repo_name
stringclasses
6 values
pr_number
int64
512
78.9k
pr_title
stringlengths
3
144
pr_description
stringlengths
0
30.3k
author
stringlengths
2
21
date_created
timestamp[ns, tz=UTC]
date_merged
timestamp[ns, tz=UTC]
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
query
stringlengths
17
30.4k
filepath
stringlengths
9
210
before_content
stringlengths
0
112M
after_content
stringlengths
0
112M
label
int64
-1
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/libraries/Common/src/System/Net/Logging/NetEventSource.Common.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #if DEBUG // Uncomment to enable runtime checks to help validate that NetEventSource isn't being misused // in a way that will cause performance problems, e.g. unexpected boxing of value types. //#define DEBUG_NETEVENTSOURCE_MISUSE #endif using System.Collections; using System.Diagnostics; using System.Diagnostics.Tracing; using System.Diagnostics.CodeAnalysis; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; #pragma warning disable CA1823 // not all IDs are used by all partial providers namespace System.Net { // Implementation: // This partial file is meant to be consumed into each System.Net.* assembly that needs to log. Each such assembly also provides // its own NetEventSource partial class that adds an appropriate [EventSource] attribute, giving it a unique name for that assembly. // Those partials can then also add additional events if needed, starting numbering from the NextAvailableEventId defined by this partial. // Usage: // - Operations that may allocate (e.g. boxing a value type, using string interpolation, etc.) or that may have computations // at call sites should guard access like: // if (NetEventSource.Log.IsEnabled()) NetEventSource.Info(null, $"Found certificate: {cert}"); // info logging with a formattable string // - Operations that have zero allocations / measurable computations at call sites can use a simpler pattern, calling methods like: // NetEventSource.Info(this, "literal string"); // arbitrary message with a literal string // Debug.Asserts inside the logging methods will help to flag some misuse if the DEBUG_NETEVENTSOURCE_MISUSE compilation constant is defined. // However, because it can be difficult by observation to understand all of the costs involved, guarding can be done everywhere. // - Messages can be strings, formattable strings, or any other object. Objects (including those used in formattable strings) have special // formatting applied, controlled by the Format method. Partial specializations can also override this formatting by implementing a partial // method that takes an object and optionally provides a string representation of it, in case a particular library wants to customize further. /// <summary>Provides logging facilities for System.Net libraries.</summary> internal sealed partial class NetEventSource : EventSource { #if !ES_BUILD_STANDALONE private const string EventSourceSuppressMessage = "Parameters to this method are primitive and are trimmer safe"; #endif /// <summary>The single event source instance to use for all logging.</summary> public static readonly NetEventSource Log = new NetEventSource(); #region Metadata public static class Keywords { public const EventKeywords Default = (EventKeywords)0x0001; public const EventKeywords Debug = (EventKeywords)0x0002; // No longer used: // EnterExit = (EventKeywords)0x0004; } private const string MissingMember = "(?)"; private const string NullInstance = "(null)"; private const string StaticMethodObject = "(static)"; private const string NoParameters = ""; private const int MaxDumpSize = 1024; // No longer used: // EnterEventId = 1; // ExitEventId = 2; private const int AssociateEventId = 3; private const int InfoEventId = 4; private const int ErrorEventId = 5; private const int VerboseEventId = 6; private const int DumpArrayEventId = 7; // These events are implemented in NetEventSource.Security.cs. // Define the ids here so that projects that include NetEventSource.Security.cs will not have conflicts. private const int EnumerateSecurityPackagesId = 8; private const int SspiPackageNotFoundId = 9; private const int AcquireDefaultCredentialId = 10; private const int AcquireCredentialsHandleId = 11; private const int InitializeSecurityContextId = 12; private const int SecurityContextInputBufferId = 13; private const int SecurityContextInputBuffersId = 14; private const int AcceptSecuritContextId = 15; private const int OperationReturnedSomethingId = 16; private const int NextAvailableEventId = 17; // Update this value whenever new events are added. Derived types should base all events off of this to avoid conflicts. #endregion #region Events #region Info /// <summary>Logs an information message.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="formattableString">The message to be logged.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void Info(object? thisOrContextObject, FormattableString? formattableString = null, [CallerMemberName] string? memberName = null) { DebugValidateArg(thisOrContextObject); DebugValidateArg(formattableString); if (Log.IsEnabled()) Log.Info(IdOf(thisOrContextObject), memberName, formattableString != null ? Format(formattableString) : NoParameters); } /// <summary>Logs an information message.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="message">The message to be logged.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void Info(object? thisOrContextObject, object? message, [CallerMemberName] string? memberName = null) { DebugValidateArg(thisOrContextObject); DebugValidateArg(message); if (Log.IsEnabled()) Log.Info(IdOf(thisOrContextObject), memberName, Format(message).ToString()); } [Event(InfoEventId, Level = EventLevel.Informational, Keywords = Keywords.Default)] private void Info(string thisOrContextObject, string? memberName, string? message) => WriteEvent(InfoEventId, thisOrContextObject, memberName ?? MissingMember, message); #endregion #region Error /// <summary>Logs an error message.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="formattableString">The message to be logged.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void Error(object? thisOrContextObject, FormattableString formattableString, [CallerMemberName] string? memberName = null) { DebugValidateArg(thisOrContextObject); DebugValidateArg(formattableString); if (Log.IsEnabled()) Log.ErrorMessage(IdOf(thisOrContextObject), memberName, Format(formattableString)); } /// <summary>Logs an error message.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="message">The message to be logged.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void Error(object? thisOrContextObject, object message, [CallerMemberName] string? memberName = null) { DebugValidateArg(thisOrContextObject); DebugValidateArg(message); if (Log.IsEnabled()) Log.ErrorMessage(IdOf(thisOrContextObject), memberName, Format(message).ToString()); } [Event(ErrorEventId, Level = EventLevel.Error, Keywords = Keywords.Default)] private void ErrorMessage(string thisOrContextObject, string? memberName, string? message) => WriteEvent(ErrorEventId, thisOrContextObject, memberName ?? MissingMember, message); #endregion #region Verbose /// <summary>Logs an info message at verbose mode.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="formattableString">The message to be logged.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void Verbose(object? thisOrContextObject, FormattableString formattableString, [CallerMemberName] string? memberName = null) { DebugValidateArg(thisOrContextObject); DebugValidateArg(formattableString); if (Log.IsEnabled()) Log.VerboseMessage(IdOf(thisOrContextObject), memberName, Format(formattableString)); } /// <summary>Logs an info at verbose mode.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="message">The message to be logged.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void Verbose(object? thisOrContextObject, object message, [CallerMemberName] string? memberName = null) { DebugValidateArg(thisOrContextObject); DebugValidateArg(message); if (Log.IsEnabled()) Log.VerboseMessage(IdOf(thisOrContextObject), memberName, Format(message).ToString()); } [Event(VerboseEventId, Level = EventLevel.Verbose, Keywords = Keywords.Default)] private void VerboseMessage(string thisOrContextObject, string? memberName, string? message) => WriteEvent(VerboseEventId, thisOrContextObject, memberName ?? MissingMember, message); #endregion #region DumpBuffer /// <summary>Logs the contents of a buffer.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="buffer">The buffer to be logged.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void DumpBuffer(object? thisOrContextObject, byte[] buffer, [CallerMemberName] string? memberName = null) { DumpBuffer(thisOrContextObject, buffer, 0, buffer.Length, memberName); } /// <summary>Logs the contents of a buffer.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="buffer">The buffer to be logged.</param> /// <param name="offset">The starting offset from which to log.</param> /// <param name="count">The number of bytes to log.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void DumpBuffer(object? thisOrContextObject, byte[] buffer, int offset, int count, [CallerMemberName] string? memberName = null) { if (Log.IsEnabled() && offset >= 0 && offset <= buffer.Length - count) { count = Math.Min(count, MaxDumpSize); byte[] slice = buffer; if (offset != 0 || count != buffer.Length) { slice = new byte[count]; Buffer.BlockCopy(buffer, offset, slice, 0, count); } Log.DumpBuffer(IdOf(thisOrContextObject), memberName, slice); } } /// <summary>Logs the contents of a buffer.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="bufferPtr">The starting location of the buffer to be logged.</param> /// <param name="count">The number of bytes to log.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static unsafe void DumpBuffer(object? thisOrContextObject, IntPtr bufferPtr, int count, [CallerMemberName] string? memberName = null) { Debug.Assert(bufferPtr != IntPtr.Zero); Debug.Assert(count >= 0); if (Log.IsEnabled()) { var buffer = new byte[Math.Min(count, MaxDumpSize)]; fixed (byte* targetPtr = buffer) { Buffer.MemoryCopy((byte*)bufferPtr, targetPtr, buffer.Length, buffer.Length); } Log.DumpBuffer(IdOf(thisOrContextObject), memberName, buffer); } } [Event(DumpArrayEventId, Level = EventLevel.Verbose, Keywords = Keywords.Debug)] private void DumpBuffer(string thisOrContextObject, string? memberName, byte[] buffer) => WriteEvent(DumpArrayEventId, thisOrContextObject, memberName ?? MissingMember, buffer); #endregion #region Associate /// <summary>Logs a relationship between two objects.</summary> /// <param name="first">The first object.</param> /// <param name="second">The second object.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void Associate(object first, object second, [CallerMemberName] string? memberName = null) { DebugValidateArg(first); DebugValidateArg(second); if (Log.IsEnabled()) Log.Associate(IdOf(first), memberName, IdOf(first), IdOf(second)); } /// <summary>Logs a relationship between two objects.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="first">The first object.</param> /// <param name="second">The second object.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void Associate(object? thisOrContextObject, object first, object second, [CallerMemberName] string? memberName = null) { DebugValidateArg(thisOrContextObject); DebugValidateArg(first); DebugValidateArg(second); if (Log.IsEnabled()) Log.Associate(IdOf(thisOrContextObject), memberName, IdOf(first), IdOf(second)); } [Event(AssociateEventId, Level = EventLevel.Informational, Keywords = Keywords.Default, Message = "[{2}]<-->[{3}]")] private void Associate(string thisOrContextObject, string? memberName, string first, string second) => WriteEvent(AssociateEventId, thisOrContextObject, memberName ?? MissingMember, first, second); #endregion #endregion #region Helpers [Conditional("DEBUG_NETEVENTSOURCE_MISUSE")] private static void DebugValidateArg(object? arg) { if (!Log.IsEnabled()) { Debug.Assert(!(arg is ValueType), $"Should not be passing value type {arg?.GetType()} to logging without IsEnabled check"); Debug.Assert(!(arg is FormattableString), $"Should not be formatting FormattableString \"{arg}\" if tracing isn't enabled"); } } [Conditional("DEBUG_NETEVENTSOURCE_MISUSE")] private static void DebugValidateArg(FormattableString? arg) { Debug.Assert(Log.IsEnabled() || arg == null, $"Should not be formatting FormattableString \"{arg}\" if tracing isn't enabled"); } [NonEvent] public static string IdOf(object? value) => value != null ? value.GetType().Name + "#" + GetHashCode(value) : NullInstance; [NonEvent] public static int GetHashCode(object? value) => value?.GetHashCode() ?? 0; [NonEvent] public static object Format(object? value) { // If it's null, return a known string for null values if (value == null) { return NullInstance; } // Give another partial implementation a chance to provide its own string representation string? result = null; AdditionalCustomizedToString(value, ref result); if (result != null) { return result; } // Format arrays with their element type name and length if (value is Array arr) { return $"{arr.GetType().GetElementType()}[{((Array)value).Length}]"; } // Format ICollections as the name and count if (value is ICollection c) { return $"{c.GetType().Name}({c.Count})"; } // Format SafeHandles as their type, hash code, and pointer value if (value is SafeHandle handle) { return $"{handle.GetType().Name}:{handle.GetHashCode()}(0x{handle.DangerousGetHandle():X})"; } // Format IntPtrs as hex if (value is IntPtr) { return $"0x{value:X}"; } // If the string representation of the instance would just be its type name, // use its id instead. string? toString = value.ToString(); if (toString == null || toString == value.GetType().FullName) { return IdOf(value); } // Otherwise, return the original object so that the caller does default formatting. return value; } [NonEvent] private static string Format(FormattableString s) { switch (s.ArgumentCount) { case 0: return s.Format; case 1: return string.Format(s.Format, Format(s.GetArgument(0))); case 2: return string.Format(s.Format, Format(s.GetArgument(0)), Format(s.GetArgument(1))); case 3: return string.Format(s.Format, Format(s.GetArgument(0)), Format(s.GetArgument(1)), Format(s.GetArgument(2))); default: object?[] args = s.GetArguments(); object[] formattedArgs = new object[args.Length]; for (int i = 0; i < args.Length; i++) { formattedArgs[i] = Format(args[i]); } return string.Format(s.Format, formattedArgs); } } static partial void AdditionalCustomizedToString<T>(T value, ref string? result); #endregion #region Custom WriteEvent overloads #if !ES_BUILD_STANDALONE [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:UnrecognizedReflectionPattern", Justification = EventSourceSuppressMessage)] #endif [NonEvent] private unsafe void WriteEvent(int eventId, string? arg1, string? arg2, string? arg3, string? arg4) { if (Log.IsEnabled()) { if (arg1 == null) arg1 = ""; if (arg2 == null) arg2 = ""; if (arg3 == null) arg3 = ""; if (arg4 == null) arg4 = ""; fixed (char* string1Bytes = arg1) fixed (char* string2Bytes = arg2) fixed (char* string3Bytes = arg3) fixed (char* string4Bytes = arg4) { const int NumEventDatas = 4; var descrs = stackalloc EventData[NumEventDatas]; descrs[0] = new EventData { DataPointer = (IntPtr)string1Bytes, Size = ((arg1.Length + 1) * 2) }; descrs[1] = new EventData { DataPointer = (IntPtr)string2Bytes, Size = ((arg2.Length + 1) * 2) }; descrs[2] = new EventData { DataPointer = (IntPtr)string3Bytes, Size = ((arg3.Length + 1) * 2) }; descrs[3] = new EventData { DataPointer = (IntPtr)string4Bytes, Size = ((arg4.Length + 1) * 2) }; WriteEventCore(eventId, NumEventDatas, descrs); } } } #if !ES_BUILD_STANDALONE [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:UnrecognizedReflectionPattern", Justification = EventSourceSuppressMessage)] #endif [NonEvent] private unsafe void WriteEvent(int eventId, string? arg1, string? arg2, byte[]? arg3) { if (Log.IsEnabled()) { if (arg1 == null) arg1 = ""; if (arg2 == null) arg2 = ""; if (arg3 == null) arg3 = Array.Empty<byte>(); fixed (char* arg1Ptr = arg1) fixed (char* arg2Ptr = arg2) fixed (byte* arg3Ptr = arg3) { int bufferLength = arg3.Length; const int NumEventDatas = 4; var descrs = stackalloc EventData[NumEventDatas]; descrs[0] = new EventData { DataPointer = (IntPtr)arg1Ptr, Size = (arg1.Length + 1) * sizeof(char) }; descrs[1] = new EventData { DataPointer = (IntPtr)arg2Ptr, Size = (arg2.Length + 1) * sizeof(char) }; descrs[2] = new EventData { DataPointer = (IntPtr)(&bufferLength), Size = 4 }; descrs[3] = new EventData { DataPointer = (IntPtr)arg3Ptr, Size = bufferLength }; WriteEventCore(eventId, NumEventDatas, descrs); } } } #if !ES_BUILD_STANDALONE [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:UnrecognizedReflectionPattern", Justification = EventSourceSuppressMessage)] #endif [NonEvent] private unsafe void WriteEvent(int eventId, string? arg1, int arg2, int arg3, int arg4) { if (Log.IsEnabled()) { if (arg1 == null) arg1 = ""; fixed (char* arg1Ptr = arg1) { const int NumEventDatas = 4; var descrs = stackalloc EventData[NumEventDatas]; descrs[0] = new EventData { DataPointer = (IntPtr)(arg1Ptr), Size = (arg1.Length + 1) * sizeof(char) }; descrs[1] = new EventData { DataPointer = (IntPtr)(&arg2), Size = sizeof(int) }; descrs[2] = new EventData { DataPointer = (IntPtr)(&arg3), Size = sizeof(int) }; descrs[3] = new EventData { DataPointer = (IntPtr)(&arg4), Size = sizeof(int) }; WriteEventCore(eventId, NumEventDatas, descrs); } } } #if !ES_BUILD_STANDALONE [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:UnrecognizedReflectionPattern", Justification = EventSourceSuppressMessage)] #endif [NonEvent] private unsafe void WriteEvent(int eventId, string? arg1, int arg2, string? arg3) { if (Log.IsEnabled()) { if (arg1 == null) arg1 = ""; if (arg3 == null) arg3 = ""; fixed (char* arg1Ptr = arg1) fixed (char* arg3Ptr = arg3) { const int NumEventDatas = 3; var descrs = stackalloc EventData[NumEventDatas]; descrs[0] = new EventData { DataPointer = (IntPtr)(arg1Ptr), Size = (arg1.Length + 1) * sizeof(char) }; descrs[1] = new EventData { DataPointer = (IntPtr)(&arg2), Size = sizeof(int) }; descrs[2] = new EventData { DataPointer = (IntPtr)(arg3Ptr), Size = (arg3.Length + 1) * sizeof(char) }; WriteEventCore(eventId, NumEventDatas, descrs); } } } #if !ES_BUILD_STANDALONE [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:UnrecognizedReflectionPattern", Justification = EventSourceSuppressMessage)] #endif [NonEvent] private unsafe void WriteEvent(int eventId, string? arg1, string? arg2, int arg3) { if (Log.IsEnabled()) { if (arg1 == null) arg1 = ""; if (arg2 == null) arg2 = ""; fixed (char* arg1Ptr = arg1) fixed (char* arg2Ptr = arg2) { const int NumEventDatas = 3; var descrs = stackalloc EventData[NumEventDatas]; descrs[0] = new EventData { DataPointer = (IntPtr)(arg1Ptr), Size = (arg1.Length + 1) * sizeof(char) }; descrs[1] = new EventData { DataPointer = (IntPtr)(arg2Ptr), Size = (arg2.Length + 1) * sizeof(char) }; descrs[2] = new EventData { DataPointer = (IntPtr)(&arg3), Size = sizeof(int) }; WriteEventCore(eventId, NumEventDatas, descrs); } } } #if !ES_BUILD_STANDALONE [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:UnrecognizedReflectionPattern", Justification = EventSourceSuppressMessage)] #endif [NonEvent] private unsafe void WriteEvent(int eventId, string? arg1, string? arg2, string? arg3, int arg4) { if (Log.IsEnabled()) { if (arg1 == null) arg1 = ""; if (arg2 == null) arg2 = ""; if (arg3 == null) arg3 = ""; fixed (char* arg1Ptr = arg1) fixed (char* arg2Ptr = arg2) fixed (char* arg3Ptr = arg3) { const int NumEventDatas = 4; var descrs = stackalloc EventData[NumEventDatas]; descrs[0] = new EventData { DataPointer = (IntPtr)(arg1Ptr), Size = (arg1.Length + 1) * sizeof(char) }; descrs[1] = new EventData { DataPointer = (IntPtr)(arg2Ptr), Size = (arg2.Length + 1) * sizeof(char) }; descrs[2] = new EventData { DataPointer = (IntPtr)(arg3Ptr), Size = (arg3.Length + 1) * sizeof(char) }; descrs[3] = new EventData { DataPointer = (IntPtr)(&arg4), Size = sizeof(int) }; WriteEventCore(eventId, NumEventDatas, descrs); } } } #if !ES_BUILD_STANDALONE [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:UnrecognizedReflectionPattern", Justification = EventSourceSuppressMessage)] #endif [NonEvent] private unsafe void WriteEvent(int eventId, string arg1, int arg2, int arg3, int arg4, int arg5, int arg6, int arg7, int arg8) { if (Log.IsEnabled()) { if (arg1 == null) arg1 = ""; fixed (char* arg1Ptr = arg1) { const int NumEventDatas = 8; var descrs = stackalloc EventData[NumEventDatas]; descrs[0] = new EventData { DataPointer = (IntPtr)(arg1Ptr), Size = (arg1.Length + 1) * sizeof(char) }; descrs[1] = new EventData { DataPointer = (IntPtr)(&arg2), Size = sizeof(int) }; descrs[2] = new EventData { DataPointer = (IntPtr)(&arg3), Size = sizeof(int) }; descrs[3] = new EventData { DataPointer = (IntPtr)(&arg4), Size = sizeof(int) }; descrs[4] = new EventData { DataPointer = (IntPtr)(&arg5), Size = sizeof(int) }; descrs[5] = new EventData { DataPointer = (IntPtr)(&arg6), Size = sizeof(int) }; descrs[6] = new EventData { DataPointer = (IntPtr)(&arg7), Size = sizeof(int) }; descrs[7] = new EventData { DataPointer = (IntPtr)(&arg8), Size = sizeof(int) }; WriteEventCore(eventId, NumEventDatas, descrs); } } } #if !ES_BUILD_STANDALONE [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:UnrecognizedReflectionPattern", Justification = EventSourceSuppressMessage)] #endif [NonEvent] private unsafe void WriteEvent(int eventId, string arg1, string arg2, int arg3, int arg4, int arg5) { if (Log.IsEnabled()) { if (arg1 == null) arg1 = ""; if (arg2 == null) arg2 = ""; fixed (char* arg1Ptr = arg1) fixed (char* arg2Ptr = arg2) { const int NumEventDatas = 5; var descrs = stackalloc EventData[NumEventDatas]; descrs[0] = new EventData { DataPointer = (IntPtr)(arg1Ptr), Size = (arg1.Length + 1) * sizeof(char) }; descrs[1] = new EventData { DataPointer = (IntPtr)(arg2Ptr), Size = (arg2.Length + 1) * sizeof(char) }; descrs[2] = new EventData { DataPointer = (IntPtr)(&arg3), Size = sizeof(int) }; descrs[3] = new EventData { DataPointer = (IntPtr)(&arg4), Size = sizeof(int) }; descrs[4] = new EventData { DataPointer = (IntPtr)(&arg5), Size = sizeof(int) }; WriteEventCore(eventId, NumEventDatas, descrs); } } } #endregion } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #if DEBUG // Uncomment to enable runtime checks to help validate that NetEventSource isn't being misused // in a way that will cause performance problems, e.g. unexpected boxing of value types. //#define DEBUG_NETEVENTSOURCE_MISUSE #endif using System.Collections; using System.Diagnostics; using System.Diagnostics.Tracing; using System.Diagnostics.CodeAnalysis; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; #pragma warning disable CA1823 // not all IDs are used by all partial providers namespace System.Net { // Implementation: // This partial file is meant to be consumed into each System.Net.* assembly that needs to log. Each such assembly also provides // its own NetEventSource partial class that adds an appropriate [EventSource] attribute, giving it a unique name for that assembly. // Those partials can then also add additional events if needed, starting numbering from the NextAvailableEventId defined by this partial. // Usage: // - Operations that may allocate (e.g. boxing a value type, using string interpolation, etc.) or that may have computations // at call sites should guard access like: // if (NetEventSource.Log.IsEnabled()) NetEventSource.Info(null, $"Found certificate: {cert}"); // info logging with a formattable string // - Operations that have zero allocations / measurable computations at call sites can use a simpler pattern, calling methods like: // NetEventSource.Info(this, "literal string"); // arbitrary message with a literal string // Debug.Asserts inside the logging methods will help to flag some misuse if the DEBUG_NETEVENTSOURCE_MISUSE compilation constant is defined. // However, because it can be difficult by observation to understand all of the costs involved, guarding can be done everywhere. // - Messages can be strings, formattable strings, or any other object. Objects (including those used in formattable strings) have special // formatting applied, controlled by the Format method. Partial specializations can also override this formatting by implementing a partial // method that takes an object and optionally provides a string representation of it, in case a particular library wants to customize further. /// <summary>Provides logging facilities for System.Net libraries.</summary> internal sealed partial class NetEventSource : EventSource { #if !ES_BUILD_STANDALONE private const string EventSourceSuppressMessage = "Parameters to this method are primitive and are trimmer safe"; #endif /// <summary>The single event source instance to use for all logging.</summary> public static readonly NetEventSource Log = new NetEventSource(); #region Metadata public static class Keywords { public const EventKeywords Default = (EventKeywords)0x0001; public const EventKeywords Debug = (EventKeywords)0x0002; // No longer used: // EnterExit = (EventKeywords)0x0004; } private const string MissingMember = "(?)"; private const string NullInstance = "(null)"; private const string StaticMethodObject = "(static)"; private const string NoParameters = ""; private const int MaxDumpSize = 1024; // No longer used: // EnterEventId = 1; // ExitEventId = 2; private const int AssociateEventId = 3; private const int InfoEventId = 4; private const int ErrorEventId = 5; private const int VerboseEventId = 6; private const int DumpArrayEventId = 7; // These events are implemented in NetEventSource.Security.cs. // Define the ids here so that projects that include NetEventSource.Security.cs will not have conflicts. private const int EnumerateSecurityPackagesId = 8; private const int SspiPackageNotFoundId = 9; private const int AcquireDefaultCredentialId = 10; private const int AcquireCredentialsHandleId = 11; private const int InitializeSecurityContextId = 12; private const int SecurityContextInputBufferId = 13; private const int SecurityContextInputBuffersId = 14; private const int AcceptSecuritContextId = 15; private const int OperationReturnedSomethingId = 16; private const int NextAvailableEventId = 17; // Update this value whenever new events are added. Derived types should base all events off of this to avoid conflicts. #endregion #region Events #region Info /// <summary>Logs an information message.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="formattableString">The message to be logged.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void Info(object? thisOrContextObject, FormattableString? formattableString = null, [CallerMemberName] string? memberName = null) { DebugValidateArg(thisOrContextObject); DebugValidateArg(formattableString); if (Log.IsEnabled()) Log.Info(IdOf(thisOrContextObject), memberName, formattableString != null ? Format(formattableString) : NoParameters); } /// <summary>Logs an information message.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="message">The message to be logged.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void Info(object? thisOrContextObject, object? message, [CallerMemberName] string? memberName = null) { DebugValidateArg(thisOrContextObject); DebugValidateArg(message); if (Log.IsEnabled()) Log.Info(IdOf(thisOrContextObject), memberName, Format(message).ToString()); } [Event(InfoEventId, Level = EventLevel.Informational, Keywords = Keywords.Default)] private void Info(string thisOrContextObject, string? memberName, string? message) => WriteEvent(InfoEventId, thisOrContextObject, memberName ?? MissingMember, message); #endregion #region Error /// <summary>Logs an error message.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="formattableString">The message to be logged.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void Error(object? thisOrContextObject, FormattableString formattableString, [CallerMemberName] string? memberName = null) { DebugValidateArg(thisOrContextObject); DebugValidateArg(formattableString); if (Log.IsEnabled()) Log.ErrorMessage(IdOf(thisOrContextObject), memberName, Format(formattableString)); } /// <summary>Logs an error message.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="message">The message to be logged.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void Error(object? thisOrContextObject, object message, [CallerMemberName] string? memberName = null) { DebugValidateArg(thisOrContextObject); DebugValidateArg(message); if (Log.IsEnabled()) Log.ErrorMessage(IdOf(thisOrContextObject), memberName, Format(message).ToString()); } [Event(ErrorEventId, Level = EventLevel.Error, Keywords = Keywords.Default)] private void ErrorMessage(string thisOrContextObject, string? memberName, string? message) => WriteEvent(ErrorEventId, thisOrContextObject, memberName ?? MissingMember, message); #endregion #region Verbose /// <summary>Logs an info message at verbose mode.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="formattableString">The message to be logged.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void Verbose(object? thisOrContextObject, FormattableString formattableString, [CallerMemberName] string? memberName = null) { DebugValidateArg(thisOrContextObject); DebugValidateArg(formattableString); if (Log.IsEnabled()) Log.VerboseMessage(IdOf(thisOrContextObject), memberName, Format(formattableString)); } /// <summary>Logs an info at verbose mode.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="message">The message to be logged.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void Verbose(object? thisOrContextObject, object message, [CallerMemberName] string? memberName = null) { DebugValidateArg(thisOrContextObject); DebugValidateArg(message); if (Log.IsEnabled()) Log.VerboseMessage(IdOf(thisOrContextObject), memberName, Format(message).ToString()); } [Event(VerboseEventId, Level = EventLevel.Verbose, Keywords = Keywords.Default)] private void VerboseMessage(string thisOrContextObject, string? memberName, string? message) => WriteEvent(VerboseEventId, thisOrContextObject, memberName ?? MissingMember, message); #endregion #region DumpBuffer /// <summary>Logs the contents of a buffer.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="buffer">The buffer to be logged.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void DumpBuffer(object? thisOrContextObject, byte[] buffer, [CallerMemberName] string? memberName = null) { DumpBuffer(thisOrContextObject, buffer, 0, buffer.Length, memberName); } /// <summary>Logs the contents of a buffer.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="buffer">The buffer to be logged.</param> /// <param name="offset">The starting offset from which to log.</param> /// <param name="count">The number of bytes to log.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void DumpBuffer(object? thisOrContextObject, byte[] buffer, int offset, int count, [CallerMemberName] string? memberName = null) { if (Log.IsEnabled() && offset >= 0 && offset <= buffer.Length - count) { count = Math.Min(count, MaxDumpSize); byte[] slice = buffer; if (offset != 0 || count != buffer.Length) { slice = new byte[count]; Buffer.BlockCopy(buffer, offset, slice, 0, count); } Log.DumpBuffer(IdOf(thisOrContextObject), memberName, slice); } } /// <summary>Logs the contents of a buffer.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="bufferPtr">The starting location of the buffer to be logged.</param> /// <param name="count">The number of bytes to log.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static unsafe void DumpBuffer(object? thisOrContextObject, IntPtr bufferPtr, int count, [CallerMemberName] string? memberName = null) { Debug.Assert(bufferPtr != IntPtr.Zero); Debug.Assert(count >= 0); if (Log.IsEnabled()) { var buffer = new byte[Math.Min(count, MaxDumpSize)]; fixed (byte* targetPtr = buffer) { Buffer.MemoryCopy((byte*)bufferPtr, targetPtr, buffer.Length, buffer.Length); } Log.DumpBuffer(IdOf(thisOrContextObject), memberName, buffer); } } [Event(DumpArrayEventId, Level = EventLevel.Verbose, Keywords = Keywords.Debug)] private void DumpBuffer(string thisOrContextObject, string? memberName, byte[] buffer) => WriteEvent(DumpArrayEventId, thisOrContextObject, memberName ?? MissingMember, buffer); #endregion #region Associate /// <summary>Logs a relationship between two objects.</summary> /// <param name="first">The first object.</param> /// <param name="second">The second object.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void Associate(object first, object second, [CallerMemberName] string? memberName = null) { DebugValidateArg(first); DebugValidateArg(second); if (Log.IsEnabled()) Log.Associate(IdOf(first), memberName, IdOf(first), IdOf(second)); } /// <summary>Logs a relationship between two objects.</summary> /// <param name="thisOrContextObject">`this`, or another object that serves to provide context for the operation.</param> /// <param name="first">The first object.</param> /// <param name="second">The second object.</param> /// <param name="memberName">The calling member.</param> [NonEvent] public static void Associate(object? thisOrContextObject, object first, object second, [CallerMemberName] string? memberName = null) { DebugValidateArg(thisOrContextObject); DebugValidateArg(first); DebugValidateArg(second); if (Log.IsEnabled()) Log.Associate(IdOf(thisOrContextObject), memberName, IdOf(first), IdOf(second)); } [Event(AssociateEventId, Level = EventLevel.Informational, Keywords = Keywords.Default, Message = "[{2}]<-->[{3}]")] private void Associate(string thisOrContextObject, string? memberName, string first, string second) => WriteEvent(AssociateEventId, thisOrContextObject, memberName ?? MissingMember, first, second); #endregion #endregion #region Helpers [Conditional("DEBUG_NETEVENTSOURCE_MISUSE")] private static void DebugValidateArg(object? arg) { if (!Log.IsEnabled()) { Debug.Assert(!(arg is ValueType), $"Should not be passing value type {arg?.GetType()} to logging without IsEnabled check"); Debug.Assert(!(arg is FormattableString), $"Should not be formatting FormattableString \"{arg}\" if tracing isn't enabled"); } } [Conditional("DEBUG_NETEVENTSOURCE_MISUSE")] private static void DebugValidateArg(FormattableString? arg) { Debug.Assert(Log.IsEnabled() || arg == null, $"Should not be formatting FormattableString \"{arg}\" if tracing isn't enabled"); } [NonEvent] public static string IdOf(object? value) => value != null ? value.GetType().Name + "#" + GetHashCode(value) : NullInstance; [NonEvent] public static int GetHashCode(object? value) => value?.GetHashCode() ?? 0; [NonEvent] public static object Format(object? value) { // If it's null, return a known string for null values if (value == null) { return NullInstance; } // Give another partial implementation a chance to provide its own string representation string? result = null; AdditionalCustomizedToString(value, ref result); if (result != null) { return result; } // Format arrays with their element type name and length if (value is Array arr) { return $"{arr.GetType().GetElementType()}[{((Array)value).Length}]"; } // Format ICollections as the name and count if (value is ICollection c) { return $"{c.GetType().Name}({c.Count})"; } // Format SafeHandles as their type, hash code, and pointer value if (value is SafeHandle handle) { return $"{handle.GetType().Name}:{handle.GetHashCode()}(0x{handle.DangerousGetHandle():X})"; } // Format IntPtrs as hex if (value is IntPtr) { return $"0x{value:X}"; } // If the string representation of the instance would just be its type name, // use its id instead. string? toString = value.ToString(); if (toString == null || toString == value.GetType().FullName) { return IdOf(value); } // Otherwise, return the original object so that the caller does default formatting. return value; } [NonEvent] private static string Format(FormattableString s) { switch (s.ArgumentCount) { case 0: return s.Format; case 1: return string.Format(s.Format, Format(s.GetArgument(0))); case 2: return string.Format(s.Format, Format(s.GetArgument(0)), Format(s.GetArgument(1))); case 3: return string.Format(s.Format, Format(s.GetArgument(0)), Format(s.GetArgument(1)), Format(s.GetArgument(2))); default: object?[] args = s.GetArguments(); object[] formattedArgs = new object[args.Length]; for (int i = 0; i < args.Length; i++) { formattedArgs[i] = Format(args[i]); } return string.Format(s.Format, formattedArgs); } } static partial void AdditionalCustomizedToString<T>(T value, ref string? result); #endregion #region Custom WriteEvent overloads #if !ES_BUILD_STANDALONE [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:UnrecognizedReflectionPattern", Justification = EventSourceSuppressMessage)] #endif [NonEvent] private unsafe void WriteEvent(int eventId, string? arg1, string? arg2, string? arg3, string? arg4) { if (Log.IsEnabled()) { if (arg1 == null) arg1 = ""; if (arg2 == null) arg2 = ""; if (arg3 == null) arg3 = ""; if (arg4 == null) arg4 = ""; fixed (char* string1Bytes = arg1) fixed (char* string2Bytes = arg2) fixed (char* string3Bytes = arg3) fixed (char* string4Bytes = arg4) { const int NumEventDatas = 4; var descrs = stackalloc EventData[NumEventDatas]; descrs[0] = new EventData { DataPointer = (IntPtr)string1Bytes, Size = ((arg1.Length + 1) * 2) }; descrs[1] = new EventData { DataPointer = (IntPtr)string2Bytes, Size = ((arg2.Length + 1) * 2) }; descrs[2] = new EventData { DataPointer = (IntPtr)string3Bytes, Size = ((arg3.Length + 1) * 2) }; descrs[3] = new EventData { DataPointer = (IntPtr)string4Bytes, Size = ((arg4.Length + 1) * 2) }; WriteEventCore(eventId, NumEventDatas, descrs); } } } #if !ES_BUILD_STANDALONE [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:UnrecognizedReflectionPattern", Justification = EventSourceSuppressMessage)] #endif [NonEvent] private unsafe void WriteEvent(int eventId, string? arg1, string? arg2, byte[]? arg3) { if (Log.IsEnabled()) { if (arg1 == null) arg1 = ""; if (arg2 == null) arg2 = ""; if (arg3 == null) arg3 = Array.Empty<byte>(); fixed (char* arg1Ptr = arg1) fixed (char* arg2Ptr = arg2) fixed (byte* arg3Ptr = arg3) { int bufferLength = arg3.Length; const int NumEventDatas = 4; var descrs = stackalloc EventData[NumEventDatas]; descrs[0] = new EventData { DataPointer = (IntPtr)arg1Ptr, Size = (arg1.Length + 1) * sizeof(char) }; descrs[1] = new EventData { DataPointer = (IntPtr)arg2Ptr, Size = (arg2.Length + 1) * sizeof(char) }; descrs[2] = new EventData { DataPointer = (IntPtr)(&bufferLength), Size = 4 }; descrs[3] = new EventData { DataPointer = (IntPtr)arg3Ptr, Size = bufferLength }; WriteEventCore(eventId, NumEventDatas, descrs); } } } #if !ES_BUILD_STANDALONE [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:UnrecognizedReflectionPattern", Justification = EventSourceSuppressMessage)] #endif [NonEvent] private unsafe void WriteEvent(int eventId, string? arg1, int arg2, int arg3, int arg4) { if (Log.IsEnabled()) { if (arg1 == null) arg1 = ""; fixed (char* arg1Ptr = arg1) { const int NumEventDatas = 4; var descrs = stackalloc EventData[NumEventDatas]; descrs[0] = new EventData { DataPointer = (IntPtr)(arg1Ptr), Size = (arg1.Length + 1) * sizeof(char) }; descrs[1] = new EventData { DataPointer = (IntPtr)(&arg2), Size = sizeof(int) }; descrs[2] = new EventData { DataPointer = (IntPtr)(&arg3), Size = sizeof(int) }; descrs[3] = new EventData { DataPointer = (IntPtr)(&arg4), Size = sizeof(int) }; WriteEventCore(eventId, NumEventDatas, descrs); } } } #if !ES_BUILD_STANDALONE [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:UnrecognizedReflectionPattern", Justification = EventSourceSuppressMessage)] #endif [NonEvent] private unsafe void WriteEvent(int eventId, string? arg1, int arg2, string? arg3) { if (Log.IsEnabled()) { if (arg1 == null) arg1 = ""; if (arg3 == null) arg3 = ""; fixed (char* arg1Ptr = arg1) fixed (char* arg3Ptr = arg3) { const int NumEventDatas = 3; var descrs = stackalloc EventData[NumEventDatas]; descrs[0] = new EventData { DataPointer = (IntPtr)(arg1Ptr), Size = (arg1.Length + 1) * sizeof(char) }; descrs[1] = new EventData { DataPointer = (IntPtr)(&arg2), Size = sizeof(int) }; descrs[2] = new EventData { DataPointer = (IntPtr)(arg3Ptr), Size = (arg3.Length + 1) * sizeof(char) }; WriteEventCore(eventId, NumEventDatas, descrs); } } } #if !ES_BUILD_STANDALONE [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:UnrecognizedReflectionPattern", Justification = EventSourceSuppressMessage)] #endif [NonEvent] private unsafe void WriteEvent(int eventId, string? arg1, string? arg2, int arg3) { if (Log.IsEnabled()) { if (arg1 == null) arg1 = ""; if (arg2 == null) arg2 = ""; fixed (char* arg1Ptr = arg1) fixed (char* arg2Ptr = arg2) { const int NumEventDatas = 3; var descrs = stackalloc EventData[NumEventDatas]; descrs[0] = new EventData { DataPointer = (IntPtr)(arg1Ptr), Size = (arg1.Length + 1) * sizeof(char) }; descrs[1] = new EventData { DataPointer = (IntPtr)(arg2Ptr), Size = (arg2.Length + 1) * sizeof(char) }; descrs[2] = new EventData { DataPointer = (IntPtr)(&arg3), Size = sizeof(int) }; WriteEventCore(eventId, NumEventDatas, descrs); } } } #if !ES_BUILD_STANDALONE [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:UnrecognizedReflectionPattern", Justification = EventSourceSuppressMessage)] #endif [NonEvent] private unsafe void WriteEvent(int eventId, string? arg1, string? arg2, string? arg3, int arg4) { if (Log.IsEnabled()) { if (arg1 == null) arg1 = ""; if (arg2 == null) arg2 = ""; if (arg3 == null) arg3 = ""; fixed (char* arg1Ptr = arg1) fixed (char* arg2Ptr = arg2) fixed (char* arg3Ptr = arg3) { const int NumEventDatas = 4; var descrs = stackalloc EventData[NumEventDatas]; descrs[0] = new EventData { DataPointer = (IntPtr)(arg1Ptr), Size = (arg1.Length + 1) * sizeof(char) }; descrs[1] = new EventData { DataPointer = (IntPtr)(arg2Ptr), Size = (arg2.Length + 1) * sizeof(char) }; descrs[2] = new EventData { DataPointer = (IntPtr)(arg3Ptr), Size = (arg3.Length + 1) * sizeof(char) }; descrs[3] = new EventData { DataPointer = (IntPtr)(&arg4), Size = sizeof(int) }; WriteEventCore(eventId, NumEventDatas, descrs); } } } #if !ES_BUILD_STANDALONE [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:UnrecognizedReflectionPattern", Justification = EventSourceSuppressMessage)] #endif [NonEvent] private unsafe void WriteEvent(int eventId, string arg1, int arg2, int arg3, int arg4, int arg5, int arg6, int arg7, int arg8) { if (Log.IsEnabled()) { if (arg1 == null) arg1 = ""; fixed (char* arg1Ptr = arg1) { const int NumEventDatas = 8; var descrs = stackalloc EventData[NumEventDatas]; descrs[0] = new EventData { DataPointer = (IntPtr)(arg1Ptr), Size = (arg1.Length + 1) * sizeof(char) }; descrs[1] = new EventData { DataPointer = (IntPtr)(&arg2), Size = sizeof(int) }; descrs[2] = new EventData { DataPointer = (IntPtr)(&arg3), Size = sizeof(int) }; descrs[3] = new EventData { DataPointer = (IntPtr)(&arg4), Size = sizeof(int) }; descrs[4] = new EventData { DataPointer = (IntPtr)(&arg5), Size = sizeof(int) }; descrs[5] = new EventData { DataPointer = (IntPtr)(&arg6), Size = sizeof(int) }; descrs[6] = new EventData { DataPointer = (IntPtr)(&arg7), Size = sizeof(int) }; descrs[7] = new EventData { DataPointer = (IntPtr)(&arg8), Size = sizeof(int) }; WriteEventCore(eventId, NumEventDatas, descrs); } } } #if !ES_BUILD_STANDALONE [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:UnrecognizedReflectionPattern", Justification = EventSourceSuppressMessage)] #endif [NonEvent] private unsafe void WriteEvent(int eventId, string arg1, string arg2, int arg3, int arg4, int arg5) { if (Log.IsEnabled()) { if (arg1 == null) arg1 = ""; if (arg2 == null) arg2 = ""; fixed (char* arg1Ptr = arg1) fixed (char* arg2Ptr = arg2) { const int NumEventDatas = 5; var descrs = stackalloc EventData[NumEventDatas]; descrs[0] = new EventData { DataPointer = (IntPtr)(arg1Ptr), Size = (arg1.Length + 1) * sizeof(char) }; descrs[1] = new EventData { DataPointer = (IntPtr)(arg2Ptr), Size = (arg2.Length + 1) * sizeof(char) }; descrs[2] = new EventData { DataPointer = (IntPtr)(&arg3), Size = sizeof(int) }; descrs[3] = new EventData { DataPointer = (IntPtr)(&arg4), Size = sizeof(int) }; descrs[4] = new EventData { DataPointer = (IntPtr)(&arg5), Size = sizeof(int) }; WriteEventCore(eventId, NumEventDatas, descrs); } } } #endregion } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/XsltApiV2/AddParameterFA3.xsl
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"> <xsl:output method="xml" omit-xml-declaration="yes" /> <xsl:variable name="param1" select="'default global'"/> <xsl:template match="/"> <xsl:call-template name="Test" /> </xsl:template> <xsl:template name="Test"> <result><xsl:value-of select="$param1" /></result> </xsl:template> </xsl:stylesheet>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"> <xsl:output method="xml" omit-xml-declaration="yes" /> <xsl:variable name="param1" select="'default global'"/> <xsl:template match="/"> <xsl:call-template name="Test" /> </xsl:template> <xsl:template name="Test"> <result><xsl:value-of select="$param1" /></result> </xsl:template> </xsl:stylesheet>
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/JIT/IL_Conformance/Old/Conformance_Base/Conv_I8.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <RestorePackages>true</RestorePackages> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="Conv_I8-64.il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <RestorePackages>true</RestorePackages> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="Conv_I8-64.il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/Compilation.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Collections.Immutable; using System.IO; using System.Runtime.InteropServices; using ILCompiler.DependencyAnalysis; using ILCompiler.DependencyAnalysisFramework; using Internal.IL; using Internal.IL.Stubs; using Internal.TypeSystem; using Internal.TypeSystem.Ecma; using CORINFO_DEVIRTUALIZATION_DETAIL = Internal.JitInterface.CORINFO_DEVIRTUALIZATION_DETAIL; using Debug = System.Diagnostics.Debug; namespace ILCompiler { public abstract class Compilation : ICompilation { protected readonly DependencyAnalyzerBase<NodeFactory> _dependencyGraph; protected readonly NodeFactory _nodeFactory; protected readonly Logger _logger; protected readonly DebugInformationProvider _debugInformationProvider; protected readonly DevirtualizationManager _devirtualizationManager; private readonly IInliningPolicy _inliningPolicy; public NameMangler NameMangler => _nodeFactory.NameMangler; public NodeFactory NodeFactory => _nodeFactory; public CompilerTypeSystemContext TypeSystemContext => NodeFactory.TypeSystemContext; public Logger Logger => _logger; public PInvokeILProvider PInvokeILProvider { get; } private readonly TypeGetTypeMethodThunkCache _typeGetTypeMethodThunks; private readonly AssemblyGetExecutingAssemblyMethodThunkCache _assemblyGetExecutingAssemblyMethodThunks; private readonly MethodBaseGetCurrentMethodThunkCache _methodBaseGetCurrentMethodThunks; protected Compilation( DependencyAnalyzerBase<NodeFactory> dependencyGraph, NodeFactory nodeFactory, IEnumerable<ICompilationRootProvider> compilationRoots, ILProvider ilProvider, DebugInformationProvider debugInformationProvider, DevirtualizationManager devirtualizationManager, IInliningPolicy inliningPolicy, Logger logger) { _dependencyGraph = dependencyGraph; _nodeFactory = nodeFactory; _logger = logger; _debugInformationProvider = debugInformationProvider; _devirtualizationManager = devirtualizationManager; _inliningPolicy = inliningPolicy; _dependencyGraph.ComputeDependencyRoutine += ComputeDependencyNodeDependencies; NodeFactory.AttachToDependencyGraph(_dependencyGraph); var rootingService = new RootingServiceProvider(nodeFactory, _dependencyGraph.AddRoot); foreach (var rootProvider in compilationRoots) rootProvider.AddCompilationRoots(rootingService); MetadataType globalModuleGeneratedType = nodeFactory.TypeSystemContext.GeneratedAssembly.GetGlobalModuleType(); _typeGetTypeMethodThunks = new TypeGetTypeMethodThunkCache(globalModuleGeneratedType); _assemblyGetExecutingAssemblyMethodThunks = new AssemblyGetExecutingAssemblyMethodThunkCache(globalModuleGeneratedType); _methodBaseGetCurrentMethodThunks = new MethodBaseGetCurrentMethodThunkCache(); PInvokeILProvider = _nodeFactory.InteropStubManager.CreatePInvokeILProvider(); if (PInvokeILProvider != null) { ilProvider = new CombinedILProvider(ilProvider, PInvokeILProvider); } _methodILCache = new ILCache(ilProvider); } private ILCache _methodILCache; public virtual MethodIL GetMethodIL(MethodDesc method) { // Flush the cache when it grows too big if (_methodILCache.Count > 1000) _methodILCache = new ILCache(_methodILCache.ILProvider); return _methodILCache.GetOrCreateValue(method).MethodIL; } protected abstract void ComputeDependencyNodeDependencies(List<DependencyNodeCore<NodeFactory>> obj); protected abstract void CompileInternal(string outputFile, ObjectDumper dumper); public void DetectGenericCycles(MethodDesc caller, MethodDesc callee) { _nodeFactory.TypeSystemContext.DetectGenericCycles(caller, callee); } public virtual IEETypeNode NecessaryTypeSymbolIfPossible(TypeDesc type) { return _nodeFactory.NecessaryTypeSymbol(type); } public bool CanInline(MethodDesc caller, MethodDesc callee) { return _inliningPolicy.CanInline(caller, callee); } public bool CanConstructType(TypeDesc type) { return _devirtualizationManager.CanConstructType(type); } public DelegateCreationInfo GetDelegateCtor(TypeDesc delegateType, MethodDesc target, bool followVirtualDispatch) { // If we're creating a delegate to a virtual method that cannot be overriden, devirtualize. // This is not just an optimization - it's required for correctness in the presence of sealed // vtable slots. if (followVirtualDispatch && (target.IsFinal || target.OwningType.IsSealed())) followVirtualDispatch = false; if (followVirtualDispatch) target = MetadataVirtualMethodAlgorithm.FindSlotDefiningMethodForVirtualMethod(target); return DelegateCreationInfo.Create(delegateType, target, NodeFactory, followVirtualDispatch); } /// <summary> /// Gets an object representing the static data for RVA mapped fields from the PE image. /// </summary> public virtual ISymbolNode GetFieldRvaData(FieldDesc field) { if (field.GetType() == typeof(PInvokeLazyFixupField)) { return NodeFactory.PInvokeMethodFixup(new PInvokeMethodData((PInvokeLazyFixupField)field)); } else if (field is ExternSymbolMappedField externField) { return NodeFactory.ExternSymbol(externField.SymbolName); } else { // Use the typical field definition in case this is an instantiated generic type field = field.GetTypicalFieldDefinition(); int fieldTypePack = (field.FieldType as MetadataType)?.GetClassLayout().PackingSize ?? 1; return NodeFactory.ReadOnlyDataBlob(NameMangler.GetMangledFieldName(field), ((EcmaField)field).GetFieldRvaData(), Math.Max(NodeFactory.Target.PointerSize, fieldTypePack)); } } public bool HasLazyStaticConstructor(TypeDesc type) { return NodeFactory.PreinitializationManager.HasLazyStaticConstructor(type); } public MethodDebugInformation GetDebugInfo(MethodIL methodIL) { return _debugInformationProvider.GetDebugInfo(methodIL); } /// <summary> /// Resolves a reference to an intrinsic method to a new method that takes it's place in the compilation. /// This is used for intrinsics where the intrinsic expansion depends on the callsite. /// </summary> /// <param name="intrinsicMethod">The intrinsic method called.</param> /// <param name="callsiteMethod">The callsite that calls the intrinsic.</param> /// <returns>The intrinsic implementation to be called for this specific callsite.</returns> public MethodDesc ExpandIntrinsicForCallsite(MethodDesc intrinsicMethod, MethodDesc callsiteMethod) { Debug.Assert(intrinsicMethod.IsIntrinsic); var intrinsicOwningType = intrinsicMethod.OwningType as MetadataType; if (intrinsicOwningType == null) return intrinsicMethod; if (intrinsicOwningType.Module != TypeSystemContext.SystemModule) return intrinsicMethod; if (intrinsicOwningType.Name == "Type" && intrinsicOwningType.Namespace == "System") { if (intrinsicMethod.Signature.IsStatic && intrinsicMethod.Name == "GetType") { ModuleDesc callsiteModule = (callsiteMethod.OwningType as MetadataType)?.Module; if (callsiteModule != null) { Debug.Assert(callsiteModule is IAssemblyDesc, "Multi-module assemblies"); return _typeGetTypeMethodThunks.GetHelper(intrinsicMethod, ((IAssemblyDesc)callsiteModule).GetName().FullName); } } } else if (intrinsicOwningType.Name == "Assembly" && intrinsicOwningType.Namespace == "System.Reflection") { if (intrinsicMethod.Signature.IsStatic && intrinsicMethod.Name == "GetExecutingAssembly") { ModuleDesc callsiteModule = (callsiteMethod.OwningType as MetadataType)?.Module; if (callsiteModule != null) { Debug.Assert(callsiteModule is IAssemblyDesc, "Multi-module assemblies"); return _assemblyGetExecutingAssemblyMethodThunks.GetHelper((IAssemblyDesc)callsiteModule); } } } else if (intrinsicOwningType.Name == "MethodBase" && intrinsicOwningType.Namespace == "System.Reflection") { if (intrinsicMethod.Signature.IsStatic && intrinsicMethod.Name == "GetCurrentMethod") { return _methodBaseGetCurrentMethodThunks.GetHelper(callsiteMethod).InstantiateAsOpen(); } } return intrinsicMethod; } public bool HasFixedSlotVTable(TypeDesc type) { return NodeFactory.VTable(type).HasFixedSlots; } public bool IsEffectivelySealed(TypeDesc type) { return _devirtualizationManager.IsEffectivelySealed(type); } public bool IsEffectivelySealed(MethodDesc method) { return _devirtualizationManager.IsEffectivelySealed(method); } public MethodDesc ResolveVirtualMethod(MethodDesc declMethod, TypeDesc implType, out CORINFO_DEVIRTUALIZATION_DETAIL devirtualizationDetail) { return _devirtualizationManager.ResolveVirtualMethod(declMethod, implType, out devirtualizationDetail); } public bool NeedsRuntimeLookup(ReadyToRunHelperId lookupKind, object targetOfLookup) { switch (lookupKind) { case ReadyToRunHelperId.TypeHandle: case ReadyToRunHelperId.NecessaryTypeHandle: case ReadyToRunHelperId.DefaultConstructor: case ReadyToRunHelperId.TypeHandleForCasting: case ReadyToRunHelperId.ObjectAllocator: return ((TypeDesc)targetOfLookup).IsRuntimeDeterminedSubtype; case ReadyToRunHelperId.MethodDictionary: case ReadyToRunHelperId.MethodEntry: case ReadyToRunHelperId.VirtualDispatchCell: case ReadyToRunHelperId.MethodHandle: return ((MethodDesc)targetOfLookup).IsRuntimeDeterminedExactMethod; case ReadyToRunHelperId.FieldHandle: return ((FieldDesc)targetOfLookup).OwningType.IsRuntimeDeterminedSubtype; case ReadyToRunHelperId.ConstrainedDirectCall: return ((ConstrainedCallInfo)targetOfLookup).Method.IsRuntimeDeterminedExactMethod || ((ConstrainedCallInfo)targetOfLookup).ConstrainedType.IsRuntimeDeterminedSubtype; default: throw new NotImplementedException(); } } public ReadyToRunHelperId GetLdTokenHelperForType(TypeDesc type) { bool canConstructPerWholeProgramAnalysis = _devirtualizationManager == null ? true : _devirtualizationManager.CanConstructType(type); return canConstructPerWholeProgramAnalysis & DependencyAnalysis.ConstructedEETypeNode.CreationAllowed(type) ? ReadyToRunHelperId.TypeHandle : ReadyToRunHelperId.NecessaryTypeHandle; } public static MethodDesc GetConstructorForCreateInstanceIntrinsic(TypeDesc type) { MethodDesc ctor = type.GetDefaultConstructor(); if (ctor == null) { MetadataType activatorType = type.Context.SystemModule.GetKnownType("System", "Activator"); if (type.IsValueType && type.GetParameterlessConstructor() == null) { ctor = activatorType.GetKnownNestedType("StructWithNoConstructor").GetKnownMethod(".ctor", null); } else { ctor = activatorType.GetKnownMethod("MissingConstructorMethod", null); } } return ctor; } public ISymbolNode ComputeConstantLookup(ReadyToRunHelperId lookupKind, object targetOfLookup) { switch (lookupKind) { case ReadyToRunHelperId.TypeHandle: return NodeFactory.ConstructedTypeSymbol((TypeDesc)targetOfLookup); case ReadyToRunHelperId.NecessaryTypeHandle: return NecessaryTypeSymbolIfPossible((TypeDesc)targetOfLookup); case ReadyToRunHelperId.TypeHandleForCasting: { var type = (TypeDesc)targetOfLookup; if (type.IsNullable) targetOfLookup = type.Instantiation[0]; return NecessaryTypeSymbolIfPossible((TypeDesc)targetOfLookup); } case ReadyToRunHelperId.MethodDictionary: return NodeFactory.MethodGenericDictionary((MethodDesc)targetOfLookup); case ReadyToRunHelperId.MethodEntry: return NodeFactory.FatFunctionPointer((MethodDesc)targetOfLookup); case ReadyToRunHelperId.MethodHandle: return NodeFactory.RuntimeMethodHandle((MethodDesc)targetOfLookup); case ReadyToRunHelperId.FieldHandle: return NodeFactory.RuntimeFieldHandle((FieldDesc)targetOfLookup); case ReadyToRunHelperId.DefaultConstructor: { var type = (TypeDesc)targetOfLookup; MethodDesc ctor = GetConstructorForCreateInstanceIntrinsic(type); return NodeFactory.CanonicalEntrypoint(ctor); } case ReadyToRunHelperId.ObjectAllocator: { var type = (TypeDesc)targetOfLookup; return NodeFactory.ExternSymbol(JitHelper.GetNewObjectHelperForType(type)); } default: throw new NotImplementedException(); } } public GenericDictionaryLookup ComputeGenericLookup(MethodDesc contextMethod, ReadyToRunHelperId lookupKind, object targetOfLookup) { if (targetOfLookup is TypeSystemEntity typeSystemEntity) { _nodeFactory.TypeSystemContext.DetectGenericCycles(contextMethod, typeSystemEntity); } GenericContextSource contextSource; if (contextMethod.RequiresInstMethodDescArg()) { contextSource = GenericContextSource.MethodParameter; } else if (contextMethod.RequiresInstMethodTableArg()) { contextSource = GenericContextSource.TypeParameter; } else { Debug.Assert(contextMethod.AcquiresInstMethodTableFromThis()); contextSource = GenericContextSource.ThisObject; } // // Some helpers represent logical concepts that might not be something that can be looked up in a dictionary // // Downgrade type handle for casting to a normal type handle if possible if (lookupKind == ReadyToRunHelperId.TypeHandleForCasting) { var type = (TypeDesc)targetOfLookup; if (!type.IsRuntimeDeterminedType || (!((RuntimeDeterminedType)type).CanonicalType.IsCanonicalDefinitionType(CanonicalFormKind.Universal) && !((RuntimeDeterminedType)type).CanonicalType.IsNullable)) { if (type.IsNullable) { targetOfLookup = type.Instantiation[0]; } lookupKind = ReadyToRunHelperId.NecessaryTypeHandle; } } // We don't have separate entries for necessary type handles to avoid possible duplication if (lookupKind == ReadyToRunHelperId.NecessaryTypeHandle) { lookupKind = ReadyToRunHelperId.TypeHandle; } // Can we do a fixed lookup? Start by checking if we can get to the dictionary. // Context source having a vtable with fixed slots is a prerequisite. if (contextSource == GenericContextSource.MethodParameter || HasFixedSlotVTable(contextMethod.OwningType)) { DictionaryLayoutNode dictionaryLayout; if (contextSource == GenericContextSource.MethodParameter) dictionaryLayout = _nodeFactory.GenericDictionaryLayout(contextMethod); else dictionaryLayout = _nodeFactory.GenericDictionaryLayout(contextMethod.OwningType); // If the dictionary layout has fixed slots, we can compute the lookup now. Otherwise defer to helper. if (dictionaryLayout.HasFixedSlots) { int pointerSize = _nodeFactory.Target.PointerSize; GenericLookupResult lookup = ReadyToRunGenericHelperNode.GetLookupSignature(_nodeFactory, lookupKind, targetOfLookup); int dictionarySlot = dictionaryLayout.GetSlotForFixedEntry(lookup); if (dictionarySlot != -1) { int dictionaryOffset = dictionarySlot * pointerSize; bool indirectLastOffset = lookup.LookupResultReferenceType(_nodeFactory) == GenericLookupResultReferenceType.Indirect; if (contextSource == GenericContextSource.MethodParameter) { return GenericDictionaryLookup.CreateFixedLookup(contextSource, dictionaryOffset, indirectLastOffset: indirectLastOffset); } else { int vtableSlot = VirtualMethodSlotHelper.GetGenericDictionarySlot(_nodeFactory, contextMethod.OwningType); int vtableOffset = EETypeNode.GetVTableOffset(pointerSize) + vtableSlot * pointerSize; return GenericDictionaryLookup.CreateFixedLookup(contextSource, vtableOffset, dictionaryOffset, indirectLastOffset: indirectLastOffset); } } } } // Fixed lookup not possible - use helper. return GenericDictionaryLookup.CreateHelperLookup(contextSource, lookupKind, targetOfLookup); } public bool IsFatPointerCandidate(MethodDesc containingMethod, MethodSignature signature) { // Unmanaged calls are never fat pointers if ((signature.Flags & MethodSignatureFlags.UnmanagedCallingConventionMask) != 0) return false; if (containingMethod.OwningType is MetadataType owningType) { // RawCalliHelper is a way for the class library to opt out of fat calls if (owningType.Name == "RawCalliHelper") return false; // Delegate invocation never needs fat calls if (owningType.IsDelegate && containingMethod.Name == "Invoke") return false; } return true; } /// <summary> /// Retreives method whose runtime handle is suitable for use with GVMLookupForSlot. /// </summary> public MethodDesc GetTargetOfGenericVirtualMethodCall(MethodDesc calledMethod) { // Should be a generic virtual method Debug.Assert(calledMethod.HasInstantiation && calledMethod.IsVirtual); // Needs to be either a concrete method, or a runtime determined form. Debug.Assert(!calledMethod.IsCanonicalMethod(CanonicalFormKind.Specific)); MethodDesc targetMethod = calledMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); MethodDesc targetMethodDefinition = targetMethod.GetMethodDefinition(); MethodDesc slotNormalizedMethodDefinition = MetadataVirtualMethodAlgorithm.FindSlotDefiningMethodForVirtualMethod(targetMethodDefinition); // If the method defines the slot, we can use that. if (slotNormalizedMethodDefinition == targetMethodDefinition) { return calledMethod; } // Normalize to the slot defining method MethodDesc slotNormalizedMethod = TypeSystemContext.GetInstantiatedMethod( slotNormalizedMethodDefinition, targetMethod.Instantiation); // Since the slot normalization logic modified what method we're looking at, we need to compute the new target of lookup. // // If we could use virtual method resolution logic with runtime determined methods, we wouldn't need what we're going // to do below. MethodDesc runtimeDeterminedSlotNormalizedMethod; if (!slotNormalizedMethod.OwningType.IsCanonicalSubtype(CanonicalFormKind.Any)) { // If the owning type is not generic, we can use it as-is, potentially only replacing the runtime-determined // method instantiation part. runtimeDeterminedSlotNormalizedMethod = slotNormalizedMethod.GetMethodDefinition(); } else { // If we need a runtime lookup but a normalization to the slot defining method happened above, we need to compute // the runtime lookup in terms of the base type that introduced the slot. // // To do that, we walk the base hierarchy of the runtime determined thing, looking for a type definition that matches // the slot-normalized virtual method. We then find the method on that type. TypeDesc runtimeDeterminedOwningType = calledMethod.OwningType; Debug.Assert(!runtimeDeterminedOwningType.IsInterface); while (!slotNormalizedMethod.OwningType.HasSameTypeDefinition(runtimeDeterminedOwningType)) { TypeDesc runtimeDeterminedBaseTypeDefinition = runtimeDeterminedOwningType.GetTypeDefinition().BaseType; if (runtimeDeterminedBaseTypeDefinition.HasInstantiation) { runtimeDeterminedOwningType = runtimeDeterminedBaseTypeDefinition.InstantiateSignature(runtimeDeterminedOwningType.Instantiation, default); } else { runtimeDeterminedOwningType = runtimeDeterminedBaseTypeDefinition; } } // Now get the method on the newly found type Debug.Assert(runtimeDeterminedOwningType.HasInstantiation); runtimeDeterminedSlotNormalizedMethod = TypeSystemContext.GetMethodForInstantiatedType( slotNormalizedMethod.GetTypicalMethodDefinition(), (InstantiatedType)runtimeDeterminedOwningType); } return TypeSystemContext.GetInstantiatedMethod(runtimeDeterminedSlotNormalizedMethod, calledMethod.Instantiation); } CompilationResults ICompilation.Compile(string outputFile, ObjectDumper dumper) { if (dumper != null) { dumper.Begin(); } CompileInternal(outputFile, dumper); if (dumper != null) { dumper.End(); } return new CompilationResults(_dependencyGraph, _nodeFactory); } private sealed class ILCache : LockFreeReaderHashtable<MethodDesc, ILCache.MethodILData> { public ILProvider ILProvider { get; } public ILCache(ILProvider provider) { ILProvider = provider; } protected override int GetKeyHashCode(MethodDesc key) { return key.GetHashCode(); } protected override int GetValueHashCode(MethodILData value) { return value.Method.GetHashCode(); } protected override bool CompareKeyToValue(MethodDesc key, MethodILData value) { return Object.ReferenceEquals(key, value.Method); } protected override bool CompareValueToValue(MethodILData value1, MethodILData value2) { return Object.ReferenceEquals(value1.Method, value2.Method); } protected override MethodILData CreateValueFromKey(MethodDesc key) { return new MethodILData() { Method = key, MethodIL = ILProvider.GetMethodIL(key) }; } internal class MethodILData { public MethodDesc Method; public MethodIL MethodIL; } } private sealed class CombinedILProvider : ILProvider { private readonly ILProvider _primaryILProvider; private readonly PInvokeILProvider _pinvokeProvider; public CombinedILProvider(ILProvider primaryILProvider, PInvokeILProvider pinvokeILProvider) { _primaryILProvider = primaryILProvider; _pinvokeProvider = pinvokeILProvider; } public override MethodIL GetMethodIL(MethodDesc method) { MethodIL result = _primaryILProvider.GetMethodIL(method); if (result == null && method.IsPInvoke) result = _pinvokeProvider.GetMethodIL(method); return result; } } } // Interface under which Compilation is exposed externally. public interface ICompilation { CompilationResults Compile(string outputFileName, ObjectDumper dumper); } public class CompilationResults { private readonly DependencyAnalyzerBase<NodeFactory> _graph; protected readonly NodeFactory _factory; protected ImmutableArray<DependencyNodeCore<NodeFactory>> MarkedNodes { get { return _graph.MarkedNodeList; } } internal CompilationResults(DependencyAnalyzerBase<NodeFactory> graph, NodeFactory factory) { _graph = graph; _factory = factory; } public void WriteDependencyLog(string fileName) { using (FileStream dgmlOutput = new FileStream(fileName, FileMode.Create)) { DgmlWriter.WriteDependencyGraphToStream(dgmlOutput, _graph, _factory); dgmlOutput.Flush(); } } public IEnumerable<MethodDesc> CompiledMethodBodies { get { foreach (var node in MarkedNodes) { if (node is IMethodBodyNode) yield return ((IMethodBodyNode)node).Method; } } } public IEnumerable<TypeDesc> ConstructedEETypes { get { foreach (var node in MarkedNodes) { if (node is ConstructedEETypeNode || node is CanonicalEETypeNode) { yield return ((IEETypeNode)node).Type; } } } } } public sealed class ConstrainedCallInfo { public readonly TypeDesc ConstrainedType; public readonly MethodDesc Method; public ConstrainedCallInfo(TypeDesc constrainedType, MethodDesc method) => (ConstrainedType, Method) = (constrainedType, method); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Collections.Immutable; using System.IO; using System.Runtime.InteropServices; using ILCompiler.DependencyAnalysis; using ILCompiler.DependencyAnalysisFramework; using Internal.IL; using Internal.IL.Stubs; using Internal.TypeSystem; using Internal.TypeSystem.Ecma; using CORINFO_DEVIRTUALIZATION_DETAIL = Internal.JitInterface.CORINFO_DEVIRTUALIZATION_DETAIL; using Debug = System.Diagnostics.Debug; namespace ILCompiler { public abstract class Compilation : ICompilation { protected readonly DependencyAnalyzerBase<NodeFactory> _dependencyGraph; protected readonly NodeFactory _nodeFactory; protected readonly Logger _logger; protected readonly DebugInformationProvider _debugInformationProvider; protected readonly DevirtualizationManager _devirtualizationManager; private readonly IInliningPolicy _inliningPolicy; public NameMangler NameMangler => _nodeFactory.NameMangler; public NodeFactory NodeFactory => _nodeFactory; public CompilerTypeSystemContext TypeSystemContext => NodeFactory.TypeSystemContext; public Logger Logger => _logger; public PInvokeILProvider PInvokeILProvider { get; } private readonly TypeGetTypeMethodThunkCache _typeGetTypeMethodThunks; private readonly AssemblyGetExecutingAssemblyMethodThunkCache _assemblyGetExecutingAssemblyMethodThunks; private readonly MethodBaseGetCurrentMethodThunkCache _methodBaseGetCurrentMethodThunks; protected Compilation( DependencyAnalyzerBase<NodeFactory> dependencyGraph, NodeFactory nodeFactory, IEnumerable<ICompilationRootProvider> compilationRoots, ILProvider ilProvider, DebugInformationProvider debugInformationProvider, DevirtualizationManager devirtualizationManager, IInliningPolicy inliningPolicy, Logger logger) { _dependencyGraph = dependencyGraph; _nodeFactory = nodeFactory; _logger = logger; _debugInformationProvider = debugInformationProvider; _devirtualizationManager = devirtualizationManager; _inliningPolicy = inliningPolicy; _dependencyGraph.ComputeDependencyRoutine += ComputeDependencyNodeDependencies; NodeFactory.AttachToDependencyGraph(_dependencyGraph); var rootingService = new RootingServiceProvider(nodeFactory, _dependencyGraph.AddRoot); foreach (var rootProvider in compilationRoots) rootProvider.AddCompilationRoots(rootingService); MetadataType globalModuleGeneratedType = nodeFactory.TypeSystemContext.GeneratedAssembly.GetGlobalModuleType(); _typeGetTypeMethodThunks = new TypeGetTypeMethodThunkCache(globalModuleGeneratedType); _assemblyGetExecutingAssemblyMethodThunks = new AssemblyGetExecutingAssemblyMethodThunkCache(globalModuleGeneratedType); _methodBaseGetCurrentMethodThunks = new MethodBaseGetCurrentMethodThunkCache(); PInvokeILProvider = _nodeFactory.InteropStubManager.CreatePInvokeILProvider(); if (PInvokeILProvider != null) { ilProvider = new CombinedILProvider(ilProvider, PInvokeILProvider); } _methodILCache = new ILCache(ilProvider); } private ILCache _methodILCache; public virtual MethodIL GetMethodIL(MethodDesc method) { // Flush the cache when it grows too big if (_methodILCache.Count > 1000) _methodILCache = new ILCache(_methodILCache.ILProvider); return _methodILCache.GetOrCreateValue(method).MethodIL; } protected abstract void ComputeDependencyNodeDependencies(List<DependencyNodeCore<NodeFactory>> obj); protected abstract void CompileInternal(string outputFile, ObjectDumper dumper); public void DetectGenericCycles(MethodDesc caller, MethodDesc callee) { _nodeFactory.TypeSystemContext.DetectGenericCycles(caller, callee); } public virtual IEETypeNode NecessaryTypeSymbolIfPossible(TypeDesc type) { return _nodeFactory.NecessaryTypeSymbol(type); } public bool CanInline(MethodDesc caller, MethodDesc callee) { return _inliningPolicy.CanInline(caller, callee); } public bool CanConstructType(TypeDesc type) { return _devirtualizationManager.CanConstructType(type); } public DelegateCreationInfo GetDelegateCtor(TypeDesc delegateType, MethodDesc target, bool followVirtualDispatch) { // If we're creating a delegate to a virtual method that cannot be overriden, devirtualize. // This is not just an optimization - it's required for correctness in the presence of sealed // vtable slots. if (followVirtualDispatch && (target.IsFinal || target.OwningType.IsSealed())) followVirtualDispatch = false; if (followVirtualDispatch) target = MetadataVirtualMethodAlgorithm.FindSlotDefiningMethodForVirtualMethod(target); return DelegateCreationInfo.Create(delegateType, target, NodeFactory, followVirtualDispatch); } /// <summary> /// Gets an object representing the static data for RVA mapped fields from the PE image. /// </summary> public virtual ISymbolNode GetFieldRvaData(FieldDesc field) { if (field.GetType() == typeof(PInvokeLazyFixupField)) { return NodeFactory.PInvokeMethodFixup(new PInvokeMethodData((PInvokeLazyFixupField)field)); } else if (field is ExternSymbolMappedField externField) { return NodeFactory.ExternSymbol(externField.SymbolName); } else { // Use the typical field definition in case this is an instantiated generic type field = field.GetTypicalFieldDefinition(); int fieldTypePack = (field.FieldType as MetadataType)?.GetClassLayout().PackingSize ?? 1; return NodeFactory.ReadOnlyDataBlob(NameMangler.GetMangledFieldName(field), ((EcmaField)field).GetFieldRvaData(), Math.Max(NodeFactory.Target.PointerSize, fieldTypePack)); } } public bool HasLazyStaticConstructor(TypeDesc type) { return NodeFactory.PreinitializationManager.HasLazyStaticConstructor(type); } public MethodDebugInformation GetDebugInfo(MethodIL methodIL) { return _debugInformationProvider.GetDebugInfo(methodIL); } /// <summary> /// Resolves a reference to an intrinsic method to a new method that takes it's place in the compilation. /// This is used for intrinsics where the intrinsic expansion depends on the callsite. /// </summary> /// <param name="intrinsicMethod">The intrinsic method called.</param> /// <param name="callsiteMethod">The callsite that calls the intrinsic.</param> /// <returns>The intrinsic implementation to be called for this specific callsite.</returns> public MethodDesc ExpandIntrinsicForCallsite(MethodDesc intrinsicMethod, MethodDesc callsiteMethod) { Debug.Assert(intrinsicMethod.IsIntrinsic); var intrinsicOwningType = intrinsicMethod.OwningType as MetadataType; if (intrinsicOwningType == null) return intrinsicMethod; if (intrinsicOwningType.Module != TypeSystemContext.SystemModule) return intrinsicMethod; if (intrinsicOwningType.Name == "Type" && intrinsicOwningType.Namespace == "System") { if (intrinsicMethod.Signature.IsStatic && intrinsicMethod.Name == "GetType") { ModuleDesc callsiteModule = (callsiteMethod.OwningType as MetadataType)?.Module; if (callsiteModule != null) { Debug.Assert(callsiteModule is IAssemblyDesc, "Multi-module assemblies"); return _typeGetTypeMethodThunks.GetHelper(intrinsicMethod, ((IAssemblyDesc)callsiteModule).GetName().FullName); } } } else if (intrinsicOwningType.Name == "Assembly" && intrinsicOwningType.Namespace == "System.Reflection") { if (intrinsicMethod.Signature.IsStatic && intrinsicMethod.Name == "GetExecutingAssembly") { ModuleDesc callsiteModule = (callsiteMethod.OwningType as MetadataType)?.Module; if (callsiteModule != null) { Debug.Assert(callsiteModule is IAssemblyDesc, "Multi-module assemblies"); return _assemblyGetExecutingAssemblyMethodThunks.GetHelper((IAssemblyDesc)callsiteModule); } } } else if (intrinsicOwningType.Name == "MethodBase" && intrinsicOwningType.Namespace == "System.Reflection") { if (intrinsicMethod.Signature.IsStatic && intrinsicMethod.Name == "GetCurrentMethod") { return _methodBaseGetCurrentMethodThunks.GetHelper(callsiteMethod).InstantiateAsOpen(); } } return intrinsicMethod; } public bool HasFixedSlotVTable(TypeDesc type) { return NodeFactory.VTable(type).HasFixedSlots; } public bool IsEffectivelySealed(TypeDesc type) { return _devirtualizationManager.IsEffectivelySealed(type); } public bool IsEffectivelySealed(MethodDesc method) { return _devirtualizationManager.IsEffectivelySealed(method); } public MethodDesc ResolveVirtualMethod(MethodDesc declMethod, TypeDesc implType, out CORINFO_DEVIRTUALIZATION_DETAIL devirtualizationDetail) { return _devirtualizationManager.ResolveVirtualMethod(declMethod, implType, out devirtualizationDetail); } public bool NeedsRuntimeLookup(ReadyToRunHelperId lookupKind, object targetOfLookup) { switch (lookupKind) { case ReadyToRunHelperId.TypeHandle: case ReadyToRunHelperId.NecessaryTypeHandle: case ReadyToRunHelperId.DefaultConstructor: case ReadyToRunHelperId.TypeHandleForCasting: case ReadyToRunHelperId.ObjectAllocator: return ((TypeDesc)targetOfLookup).IsRuntimeDeterminedSubtype; case ReadyToRunHelperId.MethodDictionary: case ReadyToRunHelperId.MethodEntry: case ReadyToRunHelperId.VirtualDispatchCell: case ReadyToRunHelperId.MethodHandle: return ((MethodDesc)targetOfLookup).IsRuntimeDeterminedExactMethod; case ReadyToRunHelperId.FieldHandle: return ((FieldDesc)targetOfLookup).OwningType.IsRuntimeDeterminedSubtype; case ReadyToRunHelperId.ConstrainedDirectCall: return ((ConstrainedCallInfo)targetOfLookup).Method.IsRuntimeDeterminedExactMethod || ((ConstrainedCallInfo)targetOfLookup).ConstrainedType.IsRuntimeDeterminedSubtype; default: throw new NotImplementedException(); } } public ReadyToRunHelperId GetLdTokenHelperForType(TypeDesc type) { bool canConstructPerWholeProgramAnalysis = _devirtualizationManager == null ? true : _devirtualizationManager.CanConstructType(type); return canConstructPerWholeProgramAnalysis & DependencyAnalysis.ConstructedEETypeNode.CreationAllowed(type) ? ReadyToRunHelperId.TypeHandle : ReadyToRunHelperId.NecessaryTypeHandle; } public static MethodDesc GetConstructorForCreateInstanceIntrinsic(TypeDesc type) { MethodDesc ctor = type.GetDefaultConstructor(); if (ctor == null) { MetadataType activatorType = type.Context.SystemModule.GetKnownType("System", "Activator"); if (type.IsValueType && type.GetParameterlessConstructor() == null) { ctor = activatorType.GetKnownNestedType("StructWithNoConstructor").GetKnownMethod(".ctor", null); } else { ctor = activatorType.GetKnownMethod("MissingConstructorMethod", null); } } return ctor; } public ISymbolNode ComputeConstantLookup(ReadyToRunHelperId lookupKind, object targetOfLookup) { switch (lookupKind) { case ReadyToRunHelperId.TypeHandle: return NodeFactory.ConstructedTypeSymbol((TypeDesc)targetOfLookup); case ReadyToRunHelperId.NecessaryTypeHandle: return NecessaryTypeSymbolIfPossible((TypeDesc)targetOfLookup); case ReadyToRunHelperId.TypeHandleForCasting: { var type = (TypeDesc)targetOfLookup; if (type.IsNullable) targetOfLookup = type.Instantiation[0]; return NecessaryTypeSymbolIfPossible((TypeDesc)targetOfLookup); } case ReadyToRunHelperId.MethodDictionary: return NodeFactory.MethodGenericDictionary((MethodDesc)targetOfLookup); case ReadyToRunHelperId.MethodEntry: return NodeFactory.FatFunctionPointer((MethodDesc)targetOfLookup); case ReadyToRunHelperId.MethodHandle: return NodeFactory.RuntimeMethodHandle((MethodDesc)targetOfLookup); case ReadyToRunHelperId.FieldHandle: return NodeFactory.RuntimeFieldHandle((FieldDesc)targetOfLookup); case ReadyToRunHelperId.DefaultConstructor: { var type = (TypeDesc)targetOfLookup; MethodDesc ctor = GetConstructorForCreateInstanceIntrinsic(type); return NodeFactory.CanonicalEntrypoint(ctor); } case ReadyToRunHelperId.ObjectAllocator: { var type = (TypeDesc)targetOfLookup; return NodeFactory.ExternSymbol(JitHelper.GetNewObjectHelperForType(type)); } default: throw new NotImplementedException(); } } public GenericDictionaryLookup ComputeGenericLookup(MethodDesc contextMethod, ReadyToRunHelperId lookupKind, object targetOfLookup) { if (targetOfLookup is TypeSystemEntity typeSystemEntity) { _nodeFactory.TypeSystemContext.DetectGenericCycles(contextMethod, typeSystemEntity); } GenericContextSource contextSource; if (contextMethod.RequiresInstMethodDescArg()) { contextSource = GenericContextSource.MethodParameter; } else if (contextMethod.RequiresInstMethodTableArg()) { contextSource = GenericContextSource.TypeParameter; } else { Debug.Assert(contextMethod.AcquiresInstMethodTableFromThis()); contextSource = GenericContextSource.ThisObject; } // // Some helpers represent logical concepts that might not be something that can be looked up in a dictionary // // Downgrade type handle for casting to a normal type handle if possible if (lookupKind == ReadyToRunHelperId.TypeHandleForCasting) { var type = (TypeDesc)targetOfLookup; if (!type.IsRuntimeDeterminedType || (!((RuntimeDeterminedType)type).CanonicalType.IsCanonicalDefinitionType(CanonicalFormKind.Universal) && !((RuntimeDeterminedType)type).CanonicalType.IsNullable)) { if (type.IsNullable) { targetOfLookup = type.Instantiation[0]; } lookupKind = ReadyToRunHelperId.NecessaryTypeHandle; } } // We don't have separate entries for necessary type handles to avoid possible duplication if (lookupKind == ReadyToRunHelperId.NecessaryTypeHandle) { lookupKind = ReadyToRunHelperId.TypeHandle; } // Can we do a fixed lookup? Start by checking if we can get to the dictionary. // Context source having a vtable with fixed slots is a prerequisite. if (contextSource == GenericContextSource.MethodParameter || HasFixedSlotVTable(contextMethod.OwningType)) { DictionaryLayoutNode dictionaryLayout; if (contextSource == GenericContextSource.MethodParameter) dictionaryLayout = _nodeFactory.GenericDictionaryLayout(contextMethod); else dictionaryLayout = _nodeFactory.GenericDictionaryLayout(contextMethod.OwningType); // If the dictionary layout has fixed slots, we can compute the lookup now. Otherwise defer to helper. if (dictionaryLayout.HasFixedSlots) { int pointerSize = _nodeFactory.Target.PointerSize; GenericLookupResult lookup = ReadyToRunGenericHelperNode.GetLookupSignature(_nodeFactory, lookupKind, targetOfLookup); int dictionarySlot = dictionaryLayout.GetSlotForFixedEntry(lookup); if (dictionarySlot != -1) { int dictionaryOffset = dictionarySlot * pointerSize; bool indirectLastOffset = lookup.LookupResultReferenceType(_nodeFactory) == GenericLookupResultReferenceType.Indirect; if (contextSource == GenericContextSource.MethodParameter) { return GenericDictionaryLookup.CreateFixedLookup(contextSource, dictionaryOffset, indirectLastOffset: indirectLastOffset); } else { int vtableSlot = VirtualMethodSlotHelper.GetGenericDictionarySlot(_nodeFactory, contextMethod.OwningType); int vtableOffset = EETypeNode.GetVTableOffset(pointerSize) + vtableSlot * pointerSize; return GenericDictionaryLookup.CreateFixedLookup(contextSource, vtableOffset, dictionaryOffset, indirectLastOffset: indirectLastOffset); } } } } // Fixed lookup not possible - use helper. return GenericDictionaryLookup.CreateHelperLookup(contextSource, lookupKind, targetOfLookup); } public bool IsFatPointerCandidate(MethodDesc containingMethod, MethodSignature signature) { // Unmanaged calls are never fat pointers if ((signature.Flags & MethodSignatureFlags.UnmanagedCallingConventionMask) != 0) return false; if (containingMethod.OwningType is MetadataType owningType) { // RawCalliHelper is a way for the class library to opt out of fat calls if (owningType.Name == "RawCalliHelper") return false; // Delegate invocation never needs fat calls if (owningType.IsDelegate && containingMethod.Name == "Invoke") return false; } return true; } /// <summary> /// Retreives method whose runtime handle is suitable for use with GVMLookupForSlot. /// </summary> public MethodDesc GetTargetOfGenericVirtualMethodCall(MethodDesc calledMethod) { // Should be a generic virtual method Debug.Assert(calledMethod.HasInstantiation && calledMethod.IsVirtual); // Needs to be either a concrete method, or a runtime determined form. Debug.Assert(!calledMethod.IsCanonicalMethod(CanonicalFormKind.Specific)); MethodDesc targetMethod = calledMethod.GetCanonMethodTarget(CanonicalFormKind.Specific); MethodDesc targetMethodDefinition = targetMethod.GetMethodDefinition(); MethodDesc slotNormalizedMethodDefinition = MetadataVirtualMethodAlgorithm.FindSlotDefiningMethodForVirtualMethod(targetMethodDefinition); // If the method defines the slot, we can use that. if (slotNormalizedMethodDefinition == targetMethodDefinition) { return calledMethod; } // Normalize to the slot defining method MethodDesc slotNormalizedMethod = TypeSystemContext.GetInstantiatedMethod( slotNormalizedMethodDefinition, targetMethod.Instantiation); // Since the slot normalization logic modified what method we're looking at, we need to compute the new target of lookup. // // If we could use virtual method resolution logic with runtime determined methods, we wouldn't need what we're going // to do below. MethodDesc runtimeDeterminedSlotNormalizedMethod; if (!slotNormalizedMethod.OwningType.IsCanonicalSubtype(CanonicalFormKind.Any)) { // If the owning type is not generic, we can use it as-is, potentially only replacing the runtime-determined // method instantiation part. runtimeDeterminedSlotNormalizedMethod = slotNormalizedMethod.GetMethodDefinition(); } else { // If we need a runtime lookup but a normalization to the slot defining method happened above, we need to compute // the runtime lookup in terms of the base type that introduced the slot. // // To do that, we walk the base hierarchy of the runtime determined thing, looking for a type definition that matches // the slot-normalized virtual method. We then find the method on that type. TypeDesc runtimeDeterminedOwningType = calledMethod.OwningType; Debug.Assert(!runtimeDeterminedOwningType.IsInterface); while (!slotNormalizedMethod.OwningType.HasSameTypeDefinition(runtimeDeterminedOwningType)) { TypeDesc runtimeDeterminedBaseTypeDefinition = runtimeDeterminedOwningType.GetTypeDefinition().BaseType; if (runtimeDeterminedBaseTypeDefinition.HasInstantiation) { runtimeDeterminedOwningType = runtimeDeterminedBaseTypeDefinition.InstantiateSignature(runtimeDeterminedOwningType.Instantiation, default); } else { runtimeDeterminedOwningType = runtimeDeterminedBaseTypeDefinition; } } // Now get the method on the newly found type Debug.Assert(runtimeDeterminedOwningType.HasInstantiation); runtimeDeterminedSlotNormalizedMethod = TypeSystemContext.GetMethodForInstantiatedType( slotNormalizedMethod.GetTypicalMethodDefinition(), (InstantiatedType)runtimeDeterminedOwningType); } return TypeSystemContext.GetInstantiatedMethod(runtimeDeterminedSlotNormalizedMethod, calledMethod.Instantiation); } CompilationResults ICompilation.Compile(string outputFile, ObjectDumper dumper) { if (dumper != null) { dumper.Begin(); } CompileInternal(outputFile, dumper); if (dumper != null) { dumper.End(); } return new CompilationResults(_dependencyGraph, _nodeFactory); } private sealed class ILCache : LockFreeReaderHashtable<MethodDesc, ILCache.MethodILData> { public ILProvider ILProvider { get; } public ILCache(ILProvider provider) { ILProvider = provider; } protected override int GetKeyHashCode(MethodDesc key) { return key.GetHashCode(); } protected override int GetValueHashCode(MethodILData value) { return value.Method.GetHashCode(); } protected override bool CompareKeyToValue(MethodDesc key, MethodILData value) { return Object.ReferenceEquals(key, value.Method); } protected override bool CompareValueToValue(MethodILData value1, MethodILData value2) { return Object.ReferenceEquals(value1.Method, value2.Method); } protected override MethodILData CreateValueFromKey(MethodDesc key) { return new MethodILData() { Method = key, MethodIL = ILProvider.GetMethodIL(key) }; } internal class MethodILData { public MethodDesc Method; public MethodIL MethodIL; } } private sealed class CombinedILProvider : ILProvider { private readonly ILProvider _primaryILProvider; private readonly PInvokeILProvider _pinvokeProvider; public CombinedILProvider(ILProvider primaryILProvider, PInvokeILProvider pinvokeILProvider) { _primaryILProvider = primaryILProvider; _pinvokeProvider = pinvokeILProvider; } public override MethodIL GetMethodIL(MethodDesc method) { MethodIL result = _primaryILProvider.GetMethodIL(method); if (result == null && method.IsPInvoke) result = _pinvokeProvider.GetMethodIL(method); return result; } } } // Interface under which Compilation is exposed externally. public interface ICompilation { CompilationResults Compile(string outputFileName, ObjectDumper dumper); } public class CompilationResults { private readonly DependencyAnalyzerBase<NodeFactory> _graph; protected readonly NodeFactory _factory; protected ImmutableArray<DependencyNodeCore<NodeFactory>> MarkedNodes { get { return _graph.MarkedNodeList; } } internal CompilationResults(DependencyAnalyzerBase<NodeFactory> graph, NodeFactory factory) { _graph = graph; _factory = factory; } public void WriteDependencyLog(string fileName) { using (FileStream dgmlOutput = new FileStream(fileName, FileMode.Create)) { DgmlWriter.WriteDependencyGraphToStream(dgmlOutput, _graph, _factory); dgmlOutput.Flush(); } } public IEnumerable<MethodDesc> CompiledMethodBodies { get { foreach (var node in MarkedNodes) { if (node is IMethodBodyNode) yield return ((IMethodBodyNode)node).Method; } } } public IEnumerable<TypeDesc> ConstructedEETypes { get { foreach (var node in MarkedNodes) { if (node is ConstructedEETypeNode || node is CanonicalEETypeNode) { yield return ((IEETypeNode)node).Type; } } } } } public sealed class ConstrainedCallInfo { public readonly TypeDesc ConstrainedType; public readonly MethodDesc Method; public ConstrainedCallInfo(TypeDesc constrainedType, MethodDesc method) => (ConstrainedType, Method) = (constrainedType, method); } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/JIT/HardwareIntrinsics/X86/Sse41/TestZ.UInt32.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void TestZUInt32() { var test = new BooleanBinaryOpTest__TestZUInt32(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Sse2.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Sse2.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); if (Sse2.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Sse2.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (Sse2.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (Sse2.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (Sse2.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (Sse2.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class BooleanBinaryOpTest__TestZUInt32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private GCHandle inHandle1; private GCHandle inHandle2; private ulong alignment; public DataTable(UInt32[] inArray1, UInt32[] inArray2, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt32>(); if ((alignment != 32 && alignment != 16) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<UInt32> _fld1; public Vector128<UInt32> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); return testStruct; } public void RunStructFldScenario(BooleanBinaryOpTest__TestZUInt32 testClass) { var result = Sse41.TestZ(_fld1, _fld2); testClass.ValidateResult(_fld1, _fld2, result); } public void RunStructFldScenario_Load(BooleanBinaryOpTest__TestZUInt32 testClass) { fixed (Vector128<UInt32>* pFld1 = &_fld1) fixed (Vector128<UInt32>* pFld2 = &_fld2) { var result = Sse41.TestZ( Sse2.LoadVector128((UInt32*)(pFld1)), Sse2.LoadVector128((UInt32*)(pFld2)) ); testClass.ValidateResult(_fld1, _fld2, result); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static UInt32[] _data1 = new UInt32[Op1ElementCount]; private static UInt32[] _data2 = new UInt32[Op2ElementCount]; private static Vector128<UInt32> _clsVar1; private static Vector128<UInt32> _clsVar2; private Vector128<UInt32> _fld1; private Vector128<UInt32> _fld2; private DataTable _dataTable; static BooleanBinaryOpTest__TestZUInt32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); } public BooleanBinaryOpTest__TestZUInt32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } _dataTable = new DataTable(_data1, _data2, LargestVectorSize); } public bool IsSupported => Sse41.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Sse41.TestZ( Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Sse41.TestZ( Sse2.LoadVector128((UInt32*)(_dataTable.inArray1Ptr)), Sse2.LoadVector128((UInt32*)(_dataTable.inArray2Ptr)) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Sse41.TestZ( Sse2.LoadAlignedVector128((UInt32*)(_dataTable.inArray1Ptr)), Sse2.LoadAlignedVector128((UInt32*)(_dataTable.inArray2Ptr)) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Sse41).GetMethod(nameof(Sse41.TestZ), new Type[] { typeof(Vector128<UInt32>), typeof(Vector128<UInt32>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Sse41).GetMethod(nameof(Sse41.TestZ), new Type[] { typeof(Vector128<UInt32>), typeof(Vector128<UInt32>) }) .Invoke(null, new object[] { Sse2.LoadVector128((UInt32*)(_dataTable.inArray1Ptr)), Sse2.LoadVector128((UInt32*)(_dataTable.inArray2Ptr)) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Sse41).GetMethod(nameof(Sse41.TestZ), new Type[] { typeof(Vector128<UInt32>), typeof(Vector128<UInt32>) }) .Invoke(null, new object[] { Sse2.LoadAlignedVector128((UInt32*)(_dataTable.inArray1Ptr)), Sse2.LoadAlignedVector128((UInt32*)(_dataTable.inArray2Ptr)) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Sse41.TestZ( _clsVar1, _clsVar2 ); ValidateResult(_clsVar1, _clsVar2, result); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<UInt32>* pClsVar1 = &_clsVar1) fixed (Vector128<UInt32>* pClsVar2 = &_clsVar2) { var result = Sse41.TestZ( Sse2.LoadVector128((UInt32*)(pClsVar1)), Sse2.LoadVector128((UInt32*)(pClsVar2)) ); ValidateResult(_clsVar1, _clsVar2, result); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr); var result = Sse41.TestZ(op1, op2); ValidateResult(op1, op2, result); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = Sse2.LoadVector128((UInt32*)(_dataTable.inArray1Ptr)); var op2 = Sse2.LoadVector128((UInt32*)(_dataTable.inArray2Ptr)); var result = Sse41.TestZ(op1, op2); ValidateResult(op1, op2, result); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var op1 = Sse2.LoadAlignedVector128((UInt32*)(_dataTable.inArray1Ptr)); var op2 = Sse2.LoadAlignedVector128((UInt32*)(_dataTable.inArray2Ptr)); var result = Sse41.TestZ(op1, op2); ValidateResult(op1, op2, result); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new BooleanBinaryOpTest__TestZUInt32(); var result = Sse41.TestZ(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new BooleanBinaryOpTest__TestZUInt32(); fixed (Vector128<UInt32>* pFld1 = &test._fld1) fixed (Vector128<UInt32>* pFld2 = &test._fld2) { var result = Sse41.TestZ( Sse2.LoadVector128((UInt32*)(pFld1)), Sse2.LoadVector128((UInt32*)(pFld2)) ); ValidateResult(test._fld1, test._fld2, result); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Sse41.TestZ(_fld1, _fld2); ValidateResult(_fld1, _fld2, result); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<UInt32>* pFld1 = &_fld1) fixed (Vector128<UInt32>* pFld2 = &_fld2) { var result = Sse41.TestZ( Sse2.LoadVector128((UInt32*)(pFld1)), Sse2.LoadVector128((UInt32*)(pFld2)) ); ValidateResult(_fld1, _fld2, result); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Sse41.TestZ(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = Sse41.TestZ( Sse2.LoadVector128((UInt32*)(&test._fld1)), Sse2.LoadVector128((UInt32*)(&test._fld2)) ); ValidateResult(test._fld1, test._fld2, result); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<UInt32> op1, Vector128<UInt32> op2, bool result, [CallerMemberName] string method = "") { UInt32[] inArray1 = new UInt32[Op1ElementCount]; UInt32[] inArray2 = new UInt32[Op2ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), op2); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "") { UInt32[] inArray1 = new UInt32[Op1ElementCount]; UInt32[] inArray2 = new UInt32[Op2ElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(UInt32[] left, UInt32[] right, bool result, [CallerMemberName] string method = "") { bool succeeded = true; var expectedResult = true; for (var i = 0; i < Op1ElementCount; i++) { expectedResult &= ((left[i] & right[i]) == 0); } succeeded = (expectedResult == result); if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Sse41)}.{nameof(Sse41.TestZ)}<UInt32>(Vector128<UInt32>, Vector128<UInt32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({result})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void TestZUInt32() { var test = new BooleanBinaryOpTest__TestZUInt32(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Sse2.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Sse2.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); if (Sse2.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Sse2.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (Sse2.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (Sse2.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (Sse2.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (Sse2.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class BooleanBinaryOpTest__TestZUInt32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private GCHandle inHandle1; private GCHandle inHandle2; private ulong alignment; public DataTable(UInt32[] inArray1, UInt32[] inArray2, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt32>(); if ((alignment != 32 && alignment != 16) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<UInt32> _fld1; public Vector128<UInt32> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); return testStruct; } public void RunStructFldScenario(BooleanBinaryOpTest__TestZUInt32 testClass) { var result = Sse41.TestZ(_fld1, _fld2); testClass.ValidateResult(_fld1, _fld2, result); } public void RunStructFldScenario_Load(BooleanBinaryOpTest__TestZUInt32 testClass) { fixed (Vector128<UInt32>* pFld1 = &_fld1) fixed (Vector128<UInt32>* pFld2 = &_fld2) { var result = Sse41.TestZ( Sse2.LoadVector128((UInt32*)(pFld1)), Sse2.LoadVector128((UInt32*)(pFld2)) ); testClass.ValidateResult(_fld1, _fld2, result); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static UInt32[] _data1 = new UInt32[Op1ElementCount]; private static UInt32[] _data2 = new UInt32[Op2ElementCount]; private static Vector128<UInt32> _clsVar1; private static Vector128<UInt32> _clsVar2; private Vector128<UInt32> _fld1; private Vector128<UInt32> _fld2; private DataTable _dataTable; static BooleanBinaryOpTest__TestZUInt32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); } public BooleanBinaryOpTest__TestZUInt32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } _dataTable = new DataTable(_data1, _data2, LargestVectorSize); } public bool IsSupported => Sse41.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Sse41.TestZ( Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Sse41.TestZ( Sse2.LoadVector128((UInt32*)(_dataTable.inArray1Ptr)), Sse2.LoadVector128((UInt32*)(_dataTable.inArray2Ptr)) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Sse41.TestZ( Sse2.LoadAlignedVector128((UInt32*)(_dataTable.inArray1Ptr)), Sse2.LoadAlignedVector128((UInt32*)(_dataTable.inArray2Ptr)) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Sse41).GetMethod(nameof(Sse41.TestZ), new Type[] { typeof(Vector128<UInt32>), typeof(Vector128<UInt32>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Sse41).GetMethod(nameof(Sse41.TestZ), new Type[] { typeof(Vector128<UInt32>), typeof(Vector128<UInt32>) }) .Invoke(null, new object[] { Sse2.LoadVector128((UInt32*)(_dataTable.inArray1Ptr)), Sse2.LoadVector128((UInt32*)(_dataTable.inArray2Ptr)) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Sse41).GetMethod(nameof(Sse41.TestZ), new Type[] { typeof(Vector128<UInt32>), typeof(Vector128<UInt32>) }) .Invoke(null, new object[] { Sse2.LoadAlignedVector128((UInt32*)(_dataTable.inArray1Ptr)), Sse2.LoadAlignedVector128((UInt32*)(_dataTable.inArray2Ptr)) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Sse41.TestZ( _clsVar1, _clsVar2 ); ValidateResult(_clsVar1, _clsVar2, result); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<UInt32>* pClsVar1 = &_clsVar1) fixed (Vector128<UInt32>* pClsVar2 = &_clsVar2) { var result = Sse41.TestZ( Sse2.LoadVector128((UInt32*)(pClsVar1)), Sse2.LoadVector128((UInt32*)(pClsVar2)) ); ValidateResult(_clsVar1, _clsVar2, result); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr); var result = Sse41.TestZ(op1, op2); ValidateResult(op1, op2, result); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = Sse2.LoadVector128((UInt32*)(_dataTable.inArray1Ptr)); var op2 = Sse2.LoadVector128((UInt32*)(_dataTable.inArray2Ptr)); var result = Sse41.TestZ(op1, op2); ValidateResult(op1, op2, result); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var op1 = Sse2.LoadAlignedVector128((UInt32*)(_dataTable.inArray1Ptr)); var op2 = Sse2.LoadAlignedVector128((UInt32*)(_dataTable.inArray2Ptr)); var result = Sse41.TestZ(op1, op2); ValidateResult(op1, op2, result); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new BooleanBinaryOpTest__TestZUInt32(); var result = Sse41.TestZ(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new BooleanBinaryOpTest__TestZUInt32(); fixed (Vector128<UInt32>* pFld1 = &test._fld1) fixed (Vector128<UInt32>* pFld2 = &test._fld2) { var result = Sse41.TestZ( Sse2.LoadVector128((UInt32*)(pFld1)), Sse2.LoadVector128((UInt32*)(pFld2)) ); ValidateResult(test._fld1, test._fld2, result); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Sse41.TestZ(_fld1, _fld2); ValidateResult(_fld1, _fld2, result); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<UInt32>* pFld1 = &_fld1) fixed (Vector128<UInt32>* pFld2 = &_fld2) { var result = Sse41.TestZ( Sse2.LoadVector128((UInt32*)(pFld1)), Sse2.LoadVector128((UInt32*)(pFld2)) ); ValidateResult(_fld1, _fld2, result); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Sse41.TestZ(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = Sse41.TestZ( Sse2.LoadVector128((UInt32*)(&test._fld1)), Sse2.LoadVector128((UInt32*)(&test._fld2)) ); ValidateResult(test._fld1, test._fld2, result); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<UInt32> op1, Vector128<UInt32> op2, bool result, [CallerMemberName] string method = "") { UInt32[] inArray1 = new UInt32[Op1ElementCount]; UInt32[] inArray2 = new UInt32[Op2ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), op2); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "") { UInt32[] inArray1 = new UInt32[Op1ElementCount]; UInt32[] inArray2 = new UInt32[Op2ElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(UInt32[] left, UInt32[] right, bool result, [CallerMemberName] string method = "") { bool succeeded = true; var expectedResult = true; for (var i = 0; i < Op1ElementCount; i++) { expectedResult &= ((left[i] & right[i]) == 0); } succeeded = (expectedResult == result); if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Sse41)}.{nameof(Sse41.TestZ)}<UInt32>(Vector128<UInt32>, Vector128<UInt32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({result})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/JIT/IL_Conformance/Old/Conformance_Base/ldc_conv_ovf_u4_i.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } // //====================================== //---- CLASS ---------------- .class public conv_ovf_i4 { //---- GLOBAL DATA ---------- //---- METHODS -------------- //Arg0 is what we want to convert, arg1 is what we expect it to equal //after conversion .method public static int32 conv_0(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_0(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_1(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_1(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_2(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_2(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_3(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_3(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_4(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_4(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_5(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_5(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_6(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_6(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_7(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_7(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_8(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_8(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_9(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_9(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_10(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_10(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_11(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_11(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_12(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_12(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_13(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_13(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } //---- CONSTRUCTOR ---------- .method public void conv_ovf_i4() { .maxstack 1 ret } //---- MAIN ----------------- .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 5 //====== begin testing ====== //-- min i4 -- ldc.i4 0x80000000 ldc.i4 0x80000000 call int32 conv_ovf_i4::conv_0(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- -1 i4 -- ldc.i4 0xFFFFFFFF ldc.i4 0xFFFFFFFF // ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_1(int32,int32) // ldc.i4 0xEEEEEEEE ldc.i4 0x11111111 ceq brfalse FAIL //-- 0 i4 -- ldc.i4 0x00000000 ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_2(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- +1 i4 -- ldc.i4 0x00000001 ldc.i4 0x00000001 call int32 conv_ovf_i4::conv_3(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- max i4 -- ldc.i4 0x7FFFFFFF ldc.i4 0x7FFFFFFF call int32 conv_ovf_i4::conv_4(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- Odd u4 -- ldc.i4 0x55555555 ldc.i4 0x55555555 call int32 conv_ovf_i4::conv_5(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- Even u4 -- ldc.i4 0xAAAAAAAA // ldc.i4 0x00000000 ldc.i4 0xAAAAAAAA call int32 conv_ovf_i4::conv_6(int32,int32) // ldc.i4 0xEEEEEEEE ldc.i4 0x11111111 ceq brfalse FAIL //====== begin testing ====== //-- min i4 -- /* ldc.i4 0x80000000 ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_un_7(int32,int32) ldc.i4 0xEEEEEEEE ceq ret brfalse FAIL //-- -1 i4 -- ldc.i4 0xFFFFFFFF ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_un_8(int32,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL *///-- 0 i4 -- ldc.i4 0x00000000 ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_un_9(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- +1 i4 -- ldc.i4 0x00000001 ldc.i4 0x00000001 call int32 conv_ovf_i4::conv_un_10(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- max i4 -- ldc.i4 0x7FFFFFFF ldc.i4 0x7FFFFFFF call int32 conv_ovf_i4::conv_un_11(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- Odd u4 -- ldc.i4 0x55555555 ldc.i4 0x55555555 call int32 conv_ovf_i4::conv_un_12(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- Even u4 -- /* ldc.i4 0xAAAAAAAA ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_un_13(int32,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL */ //====== end testing ======== //---- branch here on pass -- PASS: ldc.i4 100 br END //---- branch here on fail -- FAIL: ldc.i4 101 //---- return the result ---- END: ret //---- END OF METHOD -------- } //---- EOF ------------------ } .assembly ldc_conv_ovf_u4_i{}
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } // //====================================== //---- CLASS ---------------- .class public conv_ovf_i4 { //---- GLOBAL DATA ---------- //---- METHODS -------------- //Arg0 is what we want to convert, arg1 is what we expect it to equal //after conversion .method public static int32 conv_0(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_0(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_1(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_1(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_2(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_2(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_3(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_3(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_4(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_4(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_5(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_5(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_6(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_6(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_7(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_7(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_8(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_8(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_9(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_9(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_10(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_10(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_11(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_11(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_12(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_12(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_13(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start: ldarg 0 conv.ovf.i /* dup call void [System.Console]System.Console::WriteLine(int32) ldarg 1 call void [System.Console]System.Console::WriteLine(int32) */ stloc.1 leave.s try_end try_end: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_13(int32,int32) { .locals (class [mscorlib]System.OverflowException,native int) .maxstack 2 try_start2: ldarg 0 conv.ovf.i.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 // dup // stloc 0 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } //---- CONSTRUCTOR ---------- .method public void conv_ovf_i4() { .maxstack 1 ret } //---- MAIN ----------------- .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 5 //====== begin testing ====== //-- min i4 -- ldc.i4 0x80000000 ldc.i4 0x80000000 call int32 conv_ovf_i4::conv_0(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- -1 i4 -- ldc.i4 0xFFFFFFFF ldc.i4 0xFFFFFFFF // ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_1(int32,int32) // ldc.i4 0xEEEEEEEE ldc.i4 0x11111111 ceq brfalse FAIL //-- 0 i4 -- ldc.i4 0x00000000 ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_2(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- +1 i4 -- ldc.i4 0x00000001 ldc.i4 0x00000001 call int32 conv_ovf_i4::conv_3(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- max i4 -- ldc.i4 0x7FFFFFFF ldc.i4 0x7FFFFFFF call int32 conv_ovf_i4::conv_4(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- Odd u4 -- ldc.i4 0x55555555 ldc.i4 0x55555555 call int32 conv_ovf_i4::conv_5(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- Even u4 -- ldc.i4 0xAAAAAAAA // ldc.i4 0x00000000 ldc.i4 0xAAAAAAAA call int32 conv_ovf_i4::conv_6(int32,int32) // ldc.i4 0xEEEEEEEE ldc.i4 0x11111111 ceq brfalse FAIL //====== begin testing ====== //-- min i4 -- /* ldc.i4 0x80000000 ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_un_7(int32,int32) ldc.i4 0xEEEEEEEE ceq ret brfalse FAIL //-- -1 i4 -- ldc.i4 0xFFFFFFFF ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_un_8(int32,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL *///-- 0 i4 -- ldc.i4 0x00000000 ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_un_9(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- +1 i4 -- ldc.i4 0x00000001 ldc.i4 0x00000001 call int32 conv_ovf_i4::conv_un_10(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- max i4 -- ldc.i4 0x7FFFFFFF ldc.i4 0x7FFFFFFF call int32 conv_ovf_i4::conv_un_11(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- Odd u4 -- ldc.i4 0x55555555 ldc.i4 0x55555555 call int32 conv_ovf_i4::conv_un_12(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- Even u4 -- /* ldc.i4 0xAAAAAAAA ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_un_13(int32,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL */ //====== end testing ======== //---- branch here on pass -- PASS: ldc.i4 100 br END //---- branch here on fail -- FAIL: ldc.i4 101 //---- return the result ---- END: ret //---- END OF METHOD -------- } //---- EOF ------------------ } .assembly ldc_conv_ovf_u4_i{}
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/JIT/Regression/CLR-x86-JIT/V1.2-M01/b11762/b11762.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest175/Generated175.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated175.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated175.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/JIT/Regression/VS-ia64-JIT/M00/b108908/b108908.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly 'b108908' {} .assembly extern xunit.core {} .class ILGEN_0x45f9b5e5 { .method static int32 Main() { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 9 ldc.i4 1 ldc.i4 0x2 mul.ovf.un ldc.i4 2 div ldc.i4 99 add ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly 'b108908' {} .assembly extern xunit.core {} .class ILGEN_0x45f9b5e5 { .method static int32 Main() { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 9 ldc.i4 1 ldc.i4 0x2 mul.ovf.un ldc.i4 2 div ldc.i4 99 add ret } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/JIT/Methodical/fp/exgen/5w1d-04.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; internal unsafe class testout1 { public struct VT_0 { public double a1_0; public VT_0(int i) { a1_0 = 1; } } public class CL_0 { public double a0_0 = -1013.76; } public static double Func_0() { VT_0 vt_0 = new VT_0(1); vt_0.a1_0 = 10.24; CL_0 cl_0 = new CL_0(); double asgop0 = vt_0.a1_0; asgop0 -= ((cl_0.a0_0)); double asgop1 = cl_0.a0_0; asgop1 /= (-99.0); return Convert.ToDouble((asgop0 / asgop1)); } public static int Main() { int retval; retval = Convert.ToInt32(Func_0()); if ((retval >= 99) && (retval < 100)) retval = 100; if ((retval > 100) && (retval <= 101)) retval = 100; Console.WriteLine(retval); return retval; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; internal unsafe class testout1 { public struct VT_0 { public double a1_0; public VT_0(int i) { a1_0 = 1; } } public class CL_0 { public double a0_0 = -1013.76; } public static double Func_0() { VT_0 vt_0 = new VT_0(1); vt_0.a1_0 = 10.24; CL_0 cl_0 = new CL_0(); double asgop0 = vt_0.a1_0; asgop0 -= ((cl_0.a0_0)); double asgop1 = cl_0.a0_0; asgop1 /= (-99.0); return Convert.ToDouble((asgop0 / asgop1)); } public static int Main() { int retval; retval = Convert.ToInt32(Func_0()); if ((retval >= 99) && (retval < 100)) retval = 100; if ((retval > 100) && (retval <= 101)) retval = 100; Console.WriteLine(retval); return retval; } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/JIT/Methodical/refany/u_native_il_d.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="u_native.il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="u_native.il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/tests/assembly-load-reference/separatedir/LoadFileMain.cs
using System; using System.IO; using System.Reflection; public class Test { public static int Main () { var p = Path.Combine (AppDomain.CurrentDomain.BaseDirectory, "middle", "Mid.dll"); var a = Assembly.LoadFile (p); var t = a.GetType ("MyType"); bool caught = false; try { Activator.CreateInstance (t); } catch (TargetInvocationException tie) { if (tie.InnerException is FileNotFoundException) { /* reference assembly loading throws FNFE */ caught = true; } else { Console.Error.WriteLine ($"Expected tie.InnerException to be FileNotFoundException, but got {tie.InnerException}"); return 1; } } catch (Exception e) { Console.Error.WriteLine ($"Expected TargetInvocationException, but got {e}"); return 2; } if (!caught) { Console.Error.WriteLine ($"Expected an exception, but got none"); return 3; } return 0; } }
using System; using System.IO; using System.Reflection; public class Test { public static int Main () { var p = Path.Combine (AppDomain.CurrentDomain.BaseDirectory, "middle", "Mid.dll"); var a = Assembly.LoadFile (p); var t = a.GetType ("MyType"); bool caught = false; try { Activator.CreateInstance (t); } catch (TargetInvocationException tie) { if (tie.InnerException is FileNotFoundException) { /* reference assembly loading throws FNFE */ caught = true; } else { Console.Error.WriteLine ($"Expected tie.InnerException to be FileNotFoundException, but got {tie.InnerException}"); return 1; } } catch (Exception e) { Console.Error.WriteLine ($"Expected TargetInvocationException, but got {e}"); return 2; } if (!caught) { Console.Error.WriteLine ($"Expected an exception, but got none"); return 3; } return 0; } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/libraries/System.Speech/src/Internal/SrgsCompiler/CFGGrammar.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.IO; using System.Runtime.InteropServices; using System.Speech.Internal.SrgsParser; namespace System.Speech.Internal.SrgsCompiler { internal sealed class CfgGrammar { #region Constructors internal CfgGrammar() { } #endregion #region Internal Types // Preprocess CFG header file internal struct CfgHeader { internal Guid FormatId; internal Guid GrammarGUID; internal ushort langId; internal ushort pszGlobalTags; internal int cArcsInLargestState; internal StringBlob pszWords; internal StringBlob pszSymbols; internal CfgRule[] rules; internal CfgArc[] arcs; internal float[] weights; internal CfgSemanticTag[] tags; internal CfgScriptRef[] scripts; internal uint ulRootRuleIndex; internal GrammarOptions GrammarOptions; internal GrammarType GrammarMode; internal string BasePath; } [StructLayout(LayoutKind.Sequential)] internal class CfgSerializedHeader { internal CfgSerializedHeader() { } #pragma warning disable 56518 // The Binary reader cannot be disposed or it would close the underlying stream // Initializes a CfgSerializedHeader from a Stream. // If the data does not represent a cfg then UnsuportedFormatException is thrown. // This isn't a conclusive validity check, but is enough to determine if it's a CFG or not. // For a complete check CheckValidCfgFormat is used. internal CfgSerializedHeader(Stream stream) { BinaryReader br = new(stream); ulTotalSerializedSize = br.ReadUInt32(); if (ulTotalSerializedSize < SP_SPCFGSERIALIZEDHEADER_500 || ulTotalSerializedSize > int.MaxValue) { // Size is either negative or too small. XmlParser.ThrowSrgsException(SRID.UnsupportedFormat); } FormatId = new Guid(br.ReadBytes(16)); if (FormatId != CfgGrammar._SPGDF_ContextFree) { // Not of cfg format XmlParser.ThrowSrgsException(SRID.UnsupportedFormat); } GrammarGUID = new Guid(br.ReadBytes(16)); LangID = br.ReadUInt16(); pszSemanticInterpretationGlobals = br.ReadUInt16(); cArcsInLargestState = br.ReadInt32(); cchWords = br.ReadInt32(); cWords = br.ReadInt32(); pszWords = br.ReadUInt32(); if (pszWords < SP_SPCFGSERIALIZEDHEADER_500 || pszWords > ulTotalSerializedSize) { // First data points before or before valid range. XmlParser.ThrowSrgsException(SRID.UnsupportedFormat); } cchSymbols = br.ReadInt32(); pszSymbols = br.ReadUInt32(); cRules = br.ReadInt32(); pRules = br.ReadUInt32(); cArcs = br.ReadInt32(); pArcs = br.ReadUInt32(); pWeights = br.ReadUInt32(); cTags = br.ReadInt32(); tags = br.ReadUInt32(); ulReservered1 = br.ReadUInt32(); ulReservered2 = br.ReadUInt32(); if (pszWords > SP_SPCFGSERIALIZEDHEADER_500) { cScripts = br.ReadInt32(); pScripts = br.ReadUInt32(); cIL = br.ReadInt32(); pIL = br.ReadUInt32(); cPDB = br.ReadInt32(); pPDB = br.ReadUInt32(); ulRootRuleIndex = br.ReadUInt32(); GrammarOptions = (GrammarOptions)br.ReadUInt32(); cBasePath = br.ReadUInt32(); GrammarMode = br.ReadUInt32(); ulReservered3 = br.ReadUInt32(); ulReservered4 = br.ReadUInt32(); } // Else SAPI 5.0 syntax grammar - parameters set to zero } internal static bool IsCfg(Stream stream, out int cfgLength) { cfgLength = 0; BinaryReader br = new(stream); uint ulTotalSerializedSize = br.ReadUInt32(); if (ulTotalSerializedSize < SP_SPCFGSERIALIZEDHEADER_500 || ulTotalSerializedSize > int.MaxValue) { // Size is either negative or too small. return false; } Guid formatId = new(br.ReadBytes(16)); if (formatId != CfgGrammar._SPGDF_ContextFree) { // Not of cfg format return false; } cfgLength = (int)ulTotalSerializedSize; return true; } #pragma warning restore 56518 // The Binary reader cannot be disposed or it would close the underlying stream internal uint ulTotalSerializedSize; internal Guid FormatId; internal Guid GrammarGUID; internal ushort LangID; internal ushort pszSemanticInterpretationGlobals; internal int cArcsInLargestState; internal int cchWords; internal int cWords; internal uint pszWords; internal int cchSymbols; internal uint pszSymbols; internal int cRules; internal uint pRules; internal int cArcs; internal uint pArcs; internal uint pWeights; internal int cTags; internal uint tags; internal uint ulReservered1; internal uint ulReservered2; internal int cScripts; internal uint pScripts; internal int cIL; internal uint pIL; internal int cPDB; internal uint pPDB; internal uint ulRootRuleIndex; internal GrammarOptions GrammarOptions; internal uint cBasePath; internal uint GrammarMode; internal uint ulReservered3; internal uint ulReservered4; } internal class CfgProperty { internal string _pszName; internal uint _ulId; #pragma warning disable 0618 // VarEnum is obsolete internal VarEnum _comType; #pragma warning restore 0618 internal object _comValue; } #endregion #region Internal Methods // // This helper converts a serialized CFG grammar header into an in-memory header // internal static CfgHeader ConvertCfgHeader(StreamMarshaler streamHelper) { CfgSerializedHeader cfgSerializedHeader = null; return ConvertCfgHeader(streamHelper, true, true, out cfgSerializedHeader); } internal static CfgHeader ConvertCfgHeader(StreamMarshaler streamHelper, bool includeAllGrammarData, bool loadSymbols, out CfgSerializedHeader cfgSerializedHeader) { cfgSerializedHeader = new CfgSerializedHeader(streamHelper.Stream); // // Because in 64-bit code, pointers != sizeof(ULONG) we copy each member explicitly. // CfgHeader header = new(); header.FormatId = cfgSerializedHeader.FormatId; header.GrammarGUID = cfgSerializedHeader.GrammarGUID; header.langId = cfgSerializedHeader.LangID; header.pszGlobalTags = cfgSerializedHeader.pszSemanticInterpretationGlobals; header.cArcsInLargestState = cfgSerializedHeader.cArcsInLargestState; // read all the common fields header.rules = Load<CfgRule>(streamHelper, cfgSerializedHeader.pRules, cfgSerializedHeader.cRules); if (includeAllGrammarData || loadSymbols) { header.pszSymbols = LoadStringBlob(streamHelper, cfgSerializedHeader.pszSymbols, cfgSerializedHeader.cchSymbols); } if (includeAllGrammarData) { header.pszWords = LoadStringBlob(streamHelper, cfgSerializedHeader.pszWords, cfgSerializedHeader.cchWords); header.arcs = Load<CfgArc>(streamHelper, cfgSerializedHeader.pArcs, cfgSerializedHeader.cArcs); header.tags = Load<CfgSemanticTag>(streamHelper, cfgSerializedHeader.tags, cfgSerializedHeader.cTags); header.weights = Load<float>(streamHelper, cfgSerializedHeader.pWeights, cfgSerializedHeader.cArcs); } //We know that in SAPI 5.0 grammar format pszWords follows header immediately. if (cfgSerializedHeader.pszWords < Marshal.SizeOf<CfgSerializedHeader>()) { //This is SAPI 5.0 and SAPI 5.1 grammar format header.ulRootRuleIndex = 0xFFFFFFFF; header.GrammarOptions = GrammarOptions.KeyValuePairs; header.BasePath = null; header.GrammarMode = GrammarType.VoiceGrammar; } else { //This is SAPI 5.2 and beyond grammar format header.ulRootRuleIndex = cfgSerializedHeader.ulRootRuleIndex; header.GrammarOptions = cfgSerializedHeader.GrammarOptions; header.GrammarMode = (GrammarType)cfgSerializedHeader.GrammarMode; if (includeAllGrammarData) { header.scripts = Load<CfgScriptRef>(streamHelper, cfgSerializedHeader.pScripts, cfgSerializedHeader.cScripts); } // The BasePath string is written after the rules - no offset is provided // Get the chars and build the string if (cfgSerializedHeader.cBasePath > 0) { streamHelper.Stream.Position = (int)cfgSerializedHeader.pRules + (header.rules.Length * Marshal.SizeOf<CfgRule>()); header.BasePath = streamHelper.ReadNullTerminatedString(); } } // Check the content - should be valid for both SAPI 5.0 and SAPI 5.2 grammars CheckValidCfgFormat(cfgSerializedHeader, header, includeAllGrammarData); return header; } // // This helper converts a serialized CFG grammar header into an in-memory header // internal static ScriptRef[] LoadScriptRefs(StreamMarshaler streamHelper, CfgSerializedHeader pFH) { // // Because in 64-bit code, pointers != sizeof(ULONG) we copy each member explicitly. // if (pFH.FormatId != CfgGrammar._SPGDF_ContextFree) { return null; } //We know that in SAPI 5.0 grammar format pszWords follows header immediately. if (pFH.pszWords < Marshal.SizeOf<CfgSerializedHeader>()) { // Must be SAPI 6.0 or above to hold a .NET script return null; } // Get the symbols StringBlob symbols = LoadStringBlob(streamHelper, pFH.pszSymbols, pFH.cchSymbols); // Get the script refs CfgScriptRef[] cfgScripts = Load<CfgScriptRef>(streamHelper, pFH.pScripts, pFH.cScripts); // Convert the CFG script reference to ScriptRef ScriptRef[] scripts = new ScriptRef[cfgScripts.Length]; for (int i = 0; i < cfgScripts.Length; i++) { CfgScriptRef cfgScript = cfgScripts[i]; scripts[i] = new ScriptRef(symbols[cfgScript._idRule], symbols[cfgScript._idMethod], cfgScript._method); } return scripts; } internal static ScriptRef[] LoadIL(Stream stream) { using (StreamMarshaler streamHelper = new(stream)) { CfgSerializedHeader pFH = new(); streamHelper.ReadStream(pFH); return LoadScriptRefs(streamHelper, pFH); } } internal static bool LoadIL(Stream stream, out byte[] assemblyContent, out byte[] assemblyDebugSymbols, out ScriptRef[] scripts) { assemblyContent = assemblyDebugSymbols = null; scripts = null; using (StreamMarshaler streamHelper = new(stream)) { CfgSerializedHeader pFH = new(); streamHelper.ReadStream(pFH); scripts = LoadScriptRefs(streamHelper, pFH); if (scripts == null) { return false; } // Return if no script if (pFH.cIL == 0) { return false; } // Get the assembly content assemblyContent = Load<byte>(streamHelper, pFH.pIL, pFH.cIL); assemblyDebugSymbols = pFH.cPDB > 0 ? Load<byte>(streamHelper, pFH.pPDB, pFH.cPDB) : null; } return true; } #endregion #region Private Methods private static void CheckValidCfgFormat(CfgSerializedHeader pFH, CfgHeader header, bool includeAllGrammarData) { //See backend commit method to understand the layout of cfg format if (pFH.pszWords < SP_SPCFGSERIALIZEDHEADER_500) { XmlParser.ThrowSrgsException(SRID.UnsupportedFormat); } int ullStartOffset = (int)pFH.pszWords; //Check the word offset //See stringblob implementation. pFH.cchWords * sizeof(WCHAR) isn't exactly the serialized size, but it is close and must be less than the serialized size CheckSetOffsets(pFH.pszWords, pFH.cchWords * Helpers._sizeOfChar, ref ullStartOffset, pFH.ulTotalSerializedSize); //Check the symbol offset //symbol is right after word //pFH.pszSymbols is very close to pFH.pszWords + pFH.cchWords * sizeof(WCHAR) CheckSetOffsets(pFH.pszSymbols, pFH.cchSymbols * Helpers._sizeOfChar, ref ullStartOffset, pFH.ulTotalSerializedSize); //Check the rule offset if (pFH.cRules > 0) { CheckSetOffsets(pFH.pRules, pFH.cRules * Marshal.SizeOf<CfgRule>(), ref ullStartOffset, pFH.ulTotalSerializedSize); } //Check the arc offset if (pFH.cArcs > 0) { CheckSetOffsets(pFH.pArcs, pFH.cArcs * Marshal.SizeOf<CfgArc>(), ref ullStartOffset, pFH.ulTotalSerializedSize); } //Check the weight offset if (pFH.pWeights > 0) { CheckSetOffsets(pFH.pWeights, pFH.cArcs * sizeof(float), ref ullStartOffset, pFH.ulTotalSerializedSize); } //Check the semantic tag offset if (pFH.cTags > 0) { CheckSetOffsets(pFH.tags, pFH.cTags * Marshal.SizeOf<CfgSemanticTag>(), ref ullStartOffset, pFH.ulTotalSerializedSize); if (includeAllGrammarData) { //Validate the SPCFGSEMANTICTAG array pointed to by tags //We use header for easy array access //The first arc is dummy, so the start and end arcindex for semantic tag won't be zero for (int i = 0; i < header.tags.Length; i++) { int startArc = (int)header.tags[i].StartArcIndex; int endArc = (int)header.tags[i].EndArcIndex; int cArcs = header.arcs.Length; #pragma warning disable 0618 // VarEnum is obsolete if (startArc == 0 || startArc >= cArcs || endArc == 0 || endArc >= cArcs || ( header.tags[i].PropVariantType != VarEnum.VT_EMPTY && header.tags[i].PropVariantType != VarEnum.VT_BSTR && header.tags[i].PropVariantType != VarEnum.VT_BOOL && header.tags[i].PropVariantType != VarEnum.VT_R8 && header.tags[i].PropVariantType != VarEnum.VT_I4) ) { XmlParser.ThrowSrgsException(SRID.UnsupportedFormat); } #pragma warning restore 0618 } } } //Check the offset for the scripts if (pFH.cScripts > 0) { CheckSetOffsets(pFH.pScripts, pFH.cScripts * Marshal.SizeOf<CfgScriptRef>(), ref ullStartOffset, pFH.ulTotalSerializedSize); } if (pFH.cIL > 0) { CheckSetOffsets(pFH.pIL, pFH.cIL * sizeof(byte), ref ullStartOffset, pFH.ulTotalSerializedSize); } if (pFH.cPDB > 0) { CheckSetOffsets(pFH.pPDB, pFH.cPDB * sizeof(byte), ref ullStartOffset, pFH.ulTotalSerializedSize); } } private static void CheckSetOffsets(uint offset, int size, ref int start, uint max) { if (offset < (uint)start || (start = (int)offset + size) > (int)max) { XmlParser.ThrowSrgsException(SRID.UnsupportedFormat); } } private static StringBlob LoadStringBlob(StreamMarshaler streamHelper, uint iPos, int c) { char[] ach = new char[c]; streamHelper.Position = iPos; streamHelper.ReadArrayChar(ach, c); return new StringBlob(ach); } private static T[] Load<T>(StreamMarshaler streamHelper, uint iPos, int c) { T[] t = null; t = new T[c]; if (c > 0) { streamHelper.Position = iPos; streamHelper.ReadArray<T>(t, c); } return t; } #endregion #region Internal Properties internal static uint NextHandle { get { return ++s_lastHandle; } } #endregion #region Internal Fields internal static Guid _SPGDF_ContextFree = new(0x4ddc926d, 0x6ce7, 0x4dc0, 0x99, 0xa7, 0xaf, 0x9e, 0x6b, 0x6a, 0x4e, 0x91); // internal const int INFINITE = unchecked((int)0xffffffff); // INFINITE // internal static readonly Rule SPRULETRANS_TEXTBUFFER = new(-1); internal static readonly Rule SPRULETRANS_WILDCARD = new(-2); internal static readonly Rule SPRULETRANS_DICTATION = new(-3); // internal const int SPTEXTBUFFERTRANSITION = 0x3fffff; internal const int SPWILDCARDTRANSITION = 0x3ffffe; internal const int SPDICTATIONTRANSITION = 0x3ffffd; internal const int MAX_TRANSITIONS_COUNT = 256; internal const float DEFAULT_WEIGHT = 1f; // internal const int SP_LOW_CONFIDENCE = -1; internal const int SP_NORMAL_CONFIDENCE = 0; internal const int SP_HIGH_CONFIDENCE = +1; #endregion #region Private Fields private const int SP_SPCFGSERIALIZEDHEADER_500 = 100; private static uint s_lastHandle; #endregion } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.IO; using System.Runtime.InteropServices; using System.Speech.Internal.SrgsParser; namespace System.Speech.Internal.SrgsCompiler { internal sealed class CfgGrammar { #region Constructors internal CfgGrammar() { } #endregion #region Internal Types // Preprocess CFG header file internal struct CfgHeader { internal Guid FormatId; internal Guid GrammarGUID; internal ushort langId; internal ushort pszGlobalTags; internal int cArcsInLargestState; internal StringBlob pszWords; internal StringBlob pszSymbols; internal CfgRule[] rules; internal CfgArc[] arcs; internal float[] weights; internal CfgSemanticTag[] tags; internal CfgScriptRef[] scripts; internal uint ulRootRuleIndex; internal GrammarOptions GrammarOptions; internal GrammarType GrammarMode; internal string BasePath; } [StructLayout(LayoutKind.Sequential)] internal class CfgSerializedHeader { internal CfgSerializedHeader() { } #pragma warning disable 56518 // The Binary reader cannot be disposed or it would close the underlying stream // Initializes a CfgSerializedHeader from a Stream. // If the data does not represent a cfg then UnsuportedFormatException is thrown. // This isn't a conclusive validity check, but is enough to determine if it's a CFG or not. // For a complete check CheckValidCfgFormat is used. internal CfgSerializedHeader(Stream stream) { BinaryReader br = new(stream); ulTotalSerializedSize = br.ReadUInt32(); if (ulTotalSerializedSize < SP_SPCFGSERIALIZEDHEADER_500 || ulTotalSerializedSize > int.MaxValue) { // Size is either negative or too small. XmlParser.ThrowSrgsException(SRID.UnsupportedFormat); } FormatId = new Guid(br.ReadBytes(16)); if (FormatId != CfgGrammar._SPGDF_ContextFree) { // Not of cfg format XmlParser.ThrowSrgsException(SRID.UnsupportedFormat); } GrammarGUID = new Guid(br.ReadBytes(16)); LangID = br.ReadUInt16(); pszSemanticInterpretationGlobals = br.ReadUInt16(); cArcsInLargestState = br.ReadInt32(); cchWords = br.ReadInt32(); cWords = br.ReadInt32(); pszWords = br.ReadUInt32(); if (pszWords < SP_SPCFGSERIALIZEDHEADER_500 || pszWords > ulTotalSerializedSize) { // First data points before or before valid range. XmlParser.ThrowSrgsException(SRID.UnsupportedFormat); } cchSymbols = br.ReadInt32(); pszSymbols = br.ReadUInt32(); cRules = br.ReadInt32(); pRules = br.ReadUInt32(); cArcs = br.ReadInt32(); pArcs = br.ReadUInt32(); pWeights = br.ReadUInt32(); cTags = br.ReadInt32(); tags = br.ReadUInt32(); ulReservered1 = br.ReadUInt32(); ulReservered2 = br.ReadUInt32(); if (pszWords > SP_SPCFGSERIALIZEDHEADER_500) { cScripts = br.ReadInt32(); pScripts = br.ReadUInt32(); cIL = br.ReadInt32(); pIL = br.ReadUInt32(); cPDB = br.ReadInt32(); pPDB = br.ReadUInt32(); ulRootRuleIndex = br.ReadUInt32(); GrammarOptions = (GrammarOptions)br.ReadUInt32(); cBasePath = br.ReadUInt32(); GrammarMode = br.ReadUInt32(); ulReservered3 = br.ReadUInt32(); ulReservered4 = br.ReadUInt32(); } // Else SAPI 5.0 syntax grammar - parameters set to zero } internal static bool IsCfg(Stream stream, out int cfgLength) { cfgLength = 0; BinaryReader br = new(stream); uint ulTotalSerializedSize = br.ReadUInt32(); if (ulTotalSerializedSize < SP_SPCFGSERIALIZEDHEADER_500 || ulTotalSerializedSize > int.MaxValue) { // Size is either negative or too small. return false; } Guid formatId = new(br.ReadBytes(16)); if (formatId != CfgGrammar._SPGDF_ContextFree) { // Not of cfg format return false; } cfgLength = (int)ulTotalSerializedSize; return true; } #pragma warning restore 56518 // The Binary reader cannot be disposed or it would close the underlying stream internal uint ulTotalSerializedSize; internal Guid FormatId; internal Guid GrammarGUID; internal ushort LangID; internal ushort pszSemanticInterpretationGlobals; internal int cArcsInLargestState; internal int cchWords; internal int cWords; internal uint pszWords; internal int cchSymbols; internal uint pszSymbols; internal int cRules; internal uint pRules; internal int cArcs; internal uint pArcs; internal uint pWeights; internal int cTags; internal uint tags; internal uint ulReservered1; internal uint ulReservered2; internal int cScripts; internal uint pScripts; internal int cIL; internal uint pIL; internal int cPDB; internal uint pPDB; internal uint ulRootRuleIndex; internal GrammarOptions GrammarOptions; internal uint cBasePath; internal uint GrammarMode; internal uint ulReservered3; internal uint ulReservered4; } internal class CfgProperty { internal string _pszName; internal uint _ulId; #pragma warning disable 0618 // VarEnum is obsolete internal VarEnum _comType; #pragma warning restore 0618 internal object _comValue; } #endregion #region Internal Methods // // This helper converts a serialized CFG grammar header into an in-memory header // internal static CfgHeader ConvertCfgHeader(StreamMarshaler streamHelper) { CfgSerializedHeader cfgSerializedHeader = null; return ConvertCfgHeader(streamHelper, true, true, out cfgSerializedHeader); } internal static CfgHeader ConvertCfgHeader(StreamMarshaler streamHelper, bool includeAllGrammarData, bool loadSymbols, out CfgSerializedHeader cfgSerializedHeader) { cfgSerializedHeader = new CfgSerializedHeader(streamHelper.Stream); // // Because in 64-bit code, pointers != sizeof(ULONG) we copy each member explicitly. // CfgHeader header = new(); header.FormatId = cfgSerializedHeader.FormatId; header.GrammarGUID = cfgSerializedHeader.GrammarGUID; header.langId = cfgSerializedHeader.LangID; header.pszGlobalTags = cfgSerializedHeader.pszSemanticInterpretationGlobals; header.cArcsInLargestState = cfgSerializedHeader.cArcsInLargestState; // read all the common fields header.rules = Load<CfgRule>(streamHelper, cfgSerializedHeader.pRules, cfgSerializedHeader.cRules); if (includeAllGrammarData || loadSymbols) { header.pszSymbols = LoadStringBlob(streamHelper, cfgSerializedHeader.pszSymbols, cfgSerializedHeader.cchSymbols); } if (includeAllGrammarData) { header.pszWords = LoadStringBlob(streamHelper, cfgSerializedHeader.pszWords, cfgSerializedHeader.cchWords); header.arcs = Load<CfgArc>(streamHelper, cfgSerializedHeader.pArcs, cfgSerializedHeader.cArcs); header.tags = Load<CfgSemanticTag>(streamHelper, cfgSerializedHeader.tags, cfgSerializedHeader.cTags); header.weights = Load<float>(streamHelper, cfgSerializedHeader.pWeights, cfgSerializedHeader.cArcs); } //We know that in SAPI 5.0 grammar format pszWords follows header immediately. if (cfgSerializedHeader.pszWords < Marshal.SizeOf<CfgSerializedHeader>()) { //This is SAPI 5.0 and SAPI 5.1 grammar format header.ulRootRuleIndex = 0xFFFFFFFF; header.GrammarOptions = GrammarOptions.KeyValuePairs; header.BasePath = null; header.GrammarMode = GrammarType.VoiceGrammar; } else { //This is SAPI 5.2 and beyond grammar format header.ulRootRuleIndex = cfgSerializedHeader.ulRootRuleIndex; header.GrammarOptions = cfgSerializedHeader.GrammarOptions; header.GrammarMode = (GrammarType)cfgSerializedHeader.GrammarMode; if (includeAllGrammarData) { header.scripts = Load<CfgScriptRef>(streamHelper, cfgSerializedHeader.pScripts, cfgSerializedHeader.cScripts); } // The BasePath string is written after the rules - no offset is provided // Get the chars and build the string if (cfgSerializedHeader.cBasePath > 0) { streamHelper.Stream.Position = (int)cfgSerializedHeader.pRules + (header.rules.Length * Marshal.SizeOf<CfgRule>()); header.BasePath = streamHelper.ReadNullTerminatedString(); } } // Check the content - should be valid for both SAPI 5.0 and SAPI 5.2 grammars CheckValidCfgFormat(cfgSerializedHeader, header, includeAllGrammarData); return header; } // // This helper converts a serialized CFG grammar header into an in-memory header // internal static ScriptRef[] LoadScriptRefs(StreamMarshaler streamHelper, CfgSerializedHeader pFH) { // // Because in 64-bit code, pointers != sizeof(ULONG) we copy each member explicitly. // if (pFH.FormatId != CfgGrammar._SPGDF_ContextFree) { return null; } //We know that in SAPI 5.0 grammar format pszWords follows header immediately. if (pFH.pszWords < Marshal.SizeOf<CfgSerializedHeader>()) { // Must be SAPI 6.0 or above to hold a .NET script return null; } // Get the symbols StringBlob symbols = LoadStringBlob(streamHelper, pFH.pszSymbols, pFH.cchSymbols); // Get the script refs CfgScriptRef[] cfgScripts = Load<CfgScriptRef>(streamHelper, pFH.pScripts, pFH.cScripts); // Convert the CFG script reference to ScriptRef ScriptRef[] scripts = new ScriptRef[cfgScripts.Length]; for (int i = 0; i < cfgScripts.Length; i++) { CfgScriptRef cfgScript = cfgScripts[i]; scripts[i] = new ScriptRef(symbols[cfgScript._idRule], symbols[cfgScript._idMethod], cfgScript._method); } return scripts; } internal static ScriptRef[] LoadIL(Stream stream) { using (StreamMarshaler streamHelper = new(stream)) { CfgSerializedHeader pFH = new(); streamHelper.ReadStream(pFH); return LoadScriptRefs(streamHelper, pFH); } } internal static bool LoadIL(Stream stream, out byte[] assemblyContent, out byte[] assemblyDebugSymbols, out ScriptRef[] scripts) { assemblyContent = assemblyDebugSymbols = null; scripts = null; using (StreamMarshaler streamHelper = new(stream)) { CfgSerializedHeader pFH = new(); streamHelper.ReadStream(pFH); scripts = LoadScriptRefs(streamHelper, pFH); if (scripts == null) { return false; } // Return if no script if (pFH.cIL == 0) { return false; } // Get the assembly content assemblyContent = Load<byte>(streamHelper, pFH.pIL, pFH.cIL); assemblyDebugSymbols = pFH.cPDB > 0 ? Load<byte>(streamHelper, pFH.pPDB, pFH.cPDB) : null; } return true; } #endregion #region Private Methods private static void CheckValidCfgFormat(CfgSerializedHeader pFH, CfgHeader header, bool includeAllGrammarData) { //See backend commit method to understand the layout of cfg format if (pFH.pszWords < SP_SPCFGSERIALIZEDHEADER_500) { XmlParser.ThrowSrgsException(SRID.UnsupportedFormat); } int ullStartOffset = (int)pFH.pszWords; //Check the word offset //See stringblob implementation. pFH.cchWords * sizeof(WCHAR) isn't exactly the serialized size, but it is close and must be less than the serialized size CheckSetOffsets(pFH.pszWords, pFH.cchWords * Helpers._sizeOfChar, ref ullStartOffset, pFH.ulTotalSerializedSize); //Check the symbol offset //symbol is right after word //pFH.pszSymbols is very close to pFH.pszWords + pFH.cchWords * sizeof(WCHAR) CheckSetOffsets(pFH.pszSymbols, pFH.cchSymbols * Helpers._sizeOfChar, ref ullStartOffset, pFH.ulTotalSerializedSize); //Check the rule offset if (pFH.cRules > 0) { CheckSetOffsets(pFH.pRules, pFH.cRules * Marshal.SizeOf<CfgRule>(), ref ullStartOffset, pFH.ulTotalSerializedSize); } //Check the arc offset if (pFH.cArcs > 0) { CheckSetOffsets(pFH.pArcs, pFH.cArcs * Marshal.SizeOf<CfgArc>(), ref ullStartOffset, pFH.ulTotalSerializedSize); } //Check the weight offset if (pFH.pWeights > 0) { CheckSetOffsets(pFH.pWeights, pFH.cArcs * sizeof(float), ref ullStartOffset, pFH.ulTotalSerializedSize); } //Check the semantic tag offset if (pFH.cTags > 0) { CheckSetOffsets(pFH.tags, pFH.cTags * Marshal.SizeOf<CfgSemanticTag>(), ref ullStartOffset, pFH.ulTotalSerializedSize); if (includeAllGrammarData) { //Validate the SPCFGSEMANTICTAG array pointed to by tags //We use header for easy array access //The first arc is dummy, so the start and end arcindex for semantic tag won't be zero for (int i = 0; i < header.tags.Length; i++) { int startArc = (int)header.tags[i].StartArcIndex; int endArc = (int)header.tags[i].EndArcIndex; int cArcs = header.arcs.Length; #pragma warning disable 0618 // VarEnum is obsolete if (startArc == 0 || startArc >= cArcs || endArc == 0 || endArc >= cArcs || ( header.tags[i].PropVariantType != VarEnum.VT_EMPTY && header.tags[i].PropVariantType != VarEnum.VT_BSTR && header.tags[i].PropVariantType != VarEnum.VT_BOOL && header.tags[i].PropVariantType != VarEnum.VT_R8 && header.tags[i].PropVariantType != VarEnum.VT_I4) ) { XmlParser.ThrowSrgsException(SRID.UnsupportedFormat); } #pragma warning restore 0618 } } } //Check the offset for the scripts if (pFH.cScripts > 0) { CheckSetOffsets(pFH.pScripts, pFH.cScripts * Marshal.SizeOf<CfgScriptRef>(), ref ullStartOffset, pFH.ulTotalSerializedSize); } if (pFH.cIL > 0) { CheckSetOffsets(pFH.pIL, pFH.cIL * sizeof(byte), ref ullStartOffset, pFH.ulTotalSerializedSize); } if (pFH.cPDB > 0) { CheckSetOffsets(pFH.pPDB, pFH.cPDB * sizeof(byte), ref ullStartOffset, pFH.ulTotalSerializedSize); } } private static void CheckSetOffsets(uint offset, int size, ref int start, uint max) { if (offset < (uint)start || (start = (int)offset + size) > (int)max) { XmlParser.ThrowSrgsException(SRID.UnsupportedFormat); } } private static StringBlob LoadStringBlob(StreamMarshaler streamHelper, uint iPos, int c) { char[] ach = new char[c]; streamHelper.Position = iPos; streamHelper.ReadArrayChar(ach, c); return new StringBlob(ach); } private static T[] Load<T>(StreamMarshaler streamHelper, uint iPos, int c) { T[] t = null; t = new T[c]; if (c > 0) { streamHelper.Position = iPos; streamHelper.ReadArray<T>(t, c); } return t; } #endregion #region Internal Properties internal static uint NextHandle { get { return ++s_lastHandle; } } #endregion #region Internal Fields internal static Guid _SPGDF_ContextFree = new(0x4ddc926d, 0x6ce7, 0x4dc0, 0x99, 0xa7, 0xaf, 0x9e, 0x6b, 0x6a, 0x4e, 0x91); // internal const int INFINITE = unchecked((int)0xffffffff); // INFINITE // internal static readonly Rule SPRULETRANS_TEXTBUFFER = new(-1); internal static readonly Rule SPRULETRANS_WILDCARD = new(-2); internal static readonly Rule SPRULETRANS_DICTATION = new(-3); // internal const int SPTEXTBUFFERTRANSITION = 0x3fffff; internal const int SPWILDCARDTRANSITION = 0x3ffffe; internal const int SPDICTATIONTRANSITION = 0x3ffffd; internal const int MAX_TRANSITIONS_COUNT = 256; internal const float DEFAULT_WEIGHT = 1f; // internal const int SP_LOW_CONFIDENCE = -1; internal const int SP_NORMAL_CONFIDENCE = 0; internal const int SP_HIGH_CONFIDENCE = +1; #endregion #region Private Fields private const int SP_SPCFGSERIALIZEDHEADER_500 = 100; private static uint s_lastHandle; #endregion } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/JIT/Regression/JitBlue/GitHub_23739/GitHub_23739.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <DebugType /> <Optimize>True</Optimize> <AllowUnsafeBlocks>True</AllowUnsafeBlocks> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <DebugType /> <Optimize>True</Optimize> <AllowUnsafeBlocks>True</AllowUnsafeBlocks> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/libraries/System.Private.Uri/tests/FunctionalTests/UriIsWellFormedUriStringTest.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using Xunit; namespace System.PrivateUri.Tests { /// <summary> /// Summary description for UriIsWellFormedUriStringTest /// </summary> public class UriIsWellFormedUriStringTest { [Fact] public void UriIsWellFormed_AbsoluteWellFormed_Success() { Assert.True(Uri.IsWellFormedUriString("http://foo.com/bad:url", UriKind.Absolute)); } [Fact] public void UriIsWellFormed_RelativeWellFormed_Success() { Assert.True(Uri.IsWellFormedUriString("/path/file?Query", UriKind.Relative)); } [Fact] public void UriIsWellFormed_RelativeWithColon_Failure() { Assert.False(Uri.IsWellFormedUriString("http://foo", UriKind.Relative)); } [Fact] public void UriIsWellFormed_RelativeWithPercentAndColon_Failure() { Assert.False(Uri.IsWellFormedUriString("bad%20http://foo", UriKind.Relative)); } [Fact] public void UriIsWellFormed_NewRelativeRegisteredAbsolute_Throws() { Assert.ThrowsAny<FormatException>(() => new Uri("http://foo", UriKind.Relative)); } [Fact] public void UriIsWellFormed_NewAbsoluteUnregisteredAsRelative_Throws() { Assert.ThrowsAny<FormatException>(() => new Uri("any://foo", UriKind.Relative)); } [Fact] public void UriIsWellFormed_NewRelativeWithKnownSchemeAndQuery_SuccessButNotWellFormed() { Uri test = new Uri("http:?foo", UriKind.Relative); Assert.False(Uri.IsWellFormedUriString(test.ToString(), UriKind.Relative), "Not well formed"); Assert.False(Uri.IsWellFormedUriString(test.ToString(), UriKind.Absolute), "Should not be well formed"); Assert.True(Uri.TryCreate(test.ToString(), UriKind.Relative, out test), "TryCreate Mismatch"); Uri result = new Uri(new Uri("http://host.com"), test); Assert.True(Uri.IsWellFormedUriString(result.ToString(), UriKind.Absolute), "Not well formed"); } [Fact] public void UriIsWellFormed_NewRelativeWithUnknownSchemeAndQuery_Throws() { Assert.False(Uri.TryCreate("any:?foo", UriKind.Relative, out _), "TryCreate should have Failed"); // The generic parser allows this kind of absolute Uri, where the http parser does not Assert.ThrowsAny<FormatException>(() => new Uri("any:?foo", UriKind.Relative)); } [Fact] public void UriIsWellFormed_TryCreateNewRelativeWithColon_Failure() { Assert.False(Uri.TryCreate("http://foo", UriKind.Relative, out _)); } // App-compat - A colon in the first segment of a relative Uri is invalid, but we cannot reject it. [Fact] public void UriIsWellFormed_TryCreateNewRelativeWithPercentAndColon_Success() { string input = "bad%20http://foo"; Assert.True(Uri.TryCreate(input, UriKind.Relative, out Uri test)); Assert.False(test.IsWellFormedOriginalString()); Assert.False(Uri.IsWellFormedUriString(input, UriKind.Relative)); Assert.False(Uri.IsWellFormedUriString(input, UriKind.RelativeOrAbsolute)); Assert.False(Uri.IsWellFormedUriString(input, UriKind.Absolute)); } [Fact] public void UriIsWellFormed_AbsoluteWithColonToRelative_AppendsDotSlash() { Uri baseUri = new Uri("https://base.com/path/stuff"); Uri test = new Uri("https://base.com/path/hi:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Assert.True(Uri.IsWellFormedUriString(rel.ToString(), UriKind.Relative), "Not well formed: " + rel); Uri result = new Uri(baseUri, rel); Assert.Equal(test, result); //"Transitivity failure" Assert.True(string.CompareOrdinal(rel.ToString(), 0, "./", 0, 2) == 0, "Cannot have colon in first segment, must append ./"); } [Fact] public void UriIsWellFormed_AbsoluteWithPercentAndColonToRelative_AppendsDotSlash() { Uri baseUri = new Uri("https://base.com/path/stuff"); Uri test = new Uri("https://base.com/path/h%20i:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Assert.True(Uri.IsWellFormedUriString(rel.ToString(), UriKind.Relative), "Not well formed: " + rel); Uri result = new Uri(baseUri, rel); Assert.Equal(test, result); //"Transitivity failure" Assert.True(string.CompareOrdinal(rel.ToString(), 0, "./", 0, 2) == 0, "Cannot have colon in first segment, must append ./"); } [Fact] public void UriMakeRelative_ImplicitFileCommonBaseWithColon_AppendsDotSlash() { Uri baseUri = new Uri(@"c:/base/path/stuff"); Uri test = new Uri(@"c:/base/path/hi:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Assert.True(Uri.IsWellFormedUriString(rel.ToString(), UriKind.Relative), "Not well formed: " + rel); Uri result = new Uri(baseUri, rel); Assert.Equal(test.LocalPath, result.LocalPath); // "Transitivity failure" Assert.True(string.CompareOrdinal(rel.ToString(), 0, "./", 0, 2) == 0, "Cannot have colon in first segment, must append ./"); } [Fact] public void UriMakeRelative_ImplicitFileDifferentBaseWithColon_ReturnsSecondUri() { Uri baseUri = new Uri(@"c:/base/path/stuff"); Uri test = new Uri(@"d:/base/path/hi:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Uri result = new Uri(baseUri, rel); Assert.Equal(test.LocalPath, result.LocalPath); //"Transitivity failure" } [Fact] public void UriMakeRelative_ExplicitFileDifferentBaseWithColon_ReturnsSecondUri() { Uri baseUri = new Uri(@"file://c:/stuff"); Uri test = new Uri(@"file://d:/hi:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Assert.False(rel.IsAbsoluteUri, "Result should be relative"); Assert.Equal("d:/hi:there/", rel.ToString()); Uri result = new Uri(baseUri, rel); Assert.Equal(test.LocalPath, result.LocalPath); // "Transitivity failure" Assert.Equal(test.ToString(), result.ToString()); // "Transitivity failure" } [Fact] public void UriMakeRelative_ExplicitUncFileVsDosFile_ReturnsSecondPath() { Uri baseUri = new Uri(@"file:///u:/stuff"); Uri test = new Uri(@"file:///unc/hi:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Uri result = new Uri(baseUri, rel); // This is a known oddity when mix and matching Unc & dos paths in this order. // The other way works as expected. Assert.Equal("file:///u:/unc/hi:there/", result.ToString()); } [Fact] public void UriMakeRelative_ExplicitDosFileWithHost_ReturnsSecondPath() { Uri baseUri = new Uri(@"file://host/u:/stuff"); Uri test = new Uri(@"file://host/unc/hi:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Uri result = new Uri(baseUri, rel); Assert.Equal(test.LocalPath, result.LocalPath); // "Transitivity failure" } [Fact] public void UriMakeRelative_ExplicitDosFileSecondWithHost_ReturnsSecondPath() { Uri baseUri = new Uri(@"file://host/unc/stuff"); Uri test = new Uri(@"file://host/u:/hi:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Uri result = new Uri(baseUri, rel); Assert.Equal(test.LocalPath, result.LocalPath); //"Transitivity failure" } [Fact] public void UriMakeRelative_ExplicitDosFileVsUncFile_ReturnsSecondUri() { Uri baseUri = new Uri(@"file:///unc/stuff"); Uri test = new Uri(@"file:///u:/hi:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Uri result = new Uri(baseUri, rel); Assert.Equal(test.LocalPath, result.LocalPath); //"Transitivity failure" } [Fact] public void UriMakeRelative_ExplicitDosFileContainingImplicitDosPath_AddsDotSlash() { Uri baseUri = new Uri(@"file:///u:/stuff/file"); Uri test = new Uri(@"file:///u:/stuff/h:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Uri result = new Uri(baseUri, rel); Assert.Equal(test.LocalPath, result.LocalPath); //"Transitivity failure" } [Fact] public void UriMakeRelative_DifferentSchemes_ReturnsSecondUri() { Uri baseUri = new Uri(@"http://base/path/stuff"); Uri test = new Uri(@"https://base/path/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Assert.Equal(test, rel); Assert.True(Uri.IsWellFormedUriString(rel.ToString(), UriKind.Absolute), "Not well formed: " + rel); Uri result = new Uri(baseUri, rel); Assert.Equal(result, test); } [Fact] public void UriMakeRelative_DifferentHost_ReturnsSecondUri() { Uri baseUri = new Uri(@"http://host1/path/stuff"); Uri test = new Uri(@"http://host2/path/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Assert.Equal(test, rel); Assert.True(Uri.IsWellFormedUriString(rel.ToString(), UriKind.Absolute), "Not well formed: " + rel); Uri result = new Uri(baseUri, rel); Assert.Equal(result, test); } [Fact] public void UriMakeRelative_DifferentPort_ReturnsSecondUri() { Uri baseUri = new Uri(@"http://host:1/path/stuff"); Uri test = new Uri(@"http://host:2/path/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Assert.Equal(test, rel); Assert.True(Uri.IsWellFormedUriString(rel.ToString(), UriKind.Absolute), "Not well formed: " + rel); Uri result = new Uri(baseUri, rel); Assert.Equal(result, test); } [Fact] public void UriIsWellFormed_IPv6HostIriOn_True() { Assert.True(Uri.IsWellFormedUriString("http://[::1]/", UriKind.Absolute)); } public static IEnumerable<object[]> TestIsWellFormedUriStringData => new List<object[]> { // Test ImplicitFile/UNC new object[] { "c:\\directory\filename", false }, new object[] { "file://c:/directory/filename", false }, new object[] { "\\\\?\\UNC\\Server01\\user\\docs\\Letter.txt", false }, // Test Host new object[] { "http://www.contoso.com", true }, new object[] { "http://\u00E4.contos.com", true }, new object[] { "http://www.contos\u00E4.com", true }, new object[] { "http://www.contoso.com ", true }, new object[] { "http://\u00E4.contos.com ", true }, new object[] { "http:// www.contoso.com", false }, new object[] { "http:// \u00E4.contos.com", false }, new object[] { "http:// www.contos\u00E4.com", false }, new object[] { "http://www.contos o.com", false }, new object[] { "http://www.contos \u00E4.com", false }, // Test Path new object[] { "http://www.contoso.com/path???/file name", false }, new object[] { "http://www.contoso.com/\u00E4???/file name", false }, new object[] { "http:\\host/path/file", false }, new object[] { "http://www.contoso.com/a/sek http://test.com", false }, new object[] { "http://www.contoso.com/\u00E4/sek http://test.com", false }, new object[] { "http://www.contoso.com/ seka http://test.com", false }, new object[] { "http://www.contoso.com/ sek\u00E4 http://test.com", false }, new object[] { "http://www.contoso.com/ a sek http://test.com", false }, new object[] { "http://www.contoso.com/ \u00E4 sek http://test.com", false }, new object[] { "http://www.contoso.com/ \u00E4/", false }, new object[] { "http://www.contoso.com/ path/", false }, new object[] { "http://www.contoso.com/path", true }, new object[] { "http://www.contoso.com/\u00E4/", true }, new object[] { "http://www.contoso.com/path/#", true }, new object[] { "http://www.contoso.com/\u00E4/#", true }, new object[] { "http://www.contoso.com/path/# ", true }, new object[] { "http://www.contoso.com/\u00E4/# ", true }, new object[] { "http://www.contoso.com/path/ # ", false }, new object[] { "http://www.contoso.com/\u00E4/ # ", false }, new object[] { "http://www.contoso.com/path/ #", false }, new object[] { "http://www.contoso.com/\u00E4/ #", false }, new object[] { "http://www.contoso.com/path ", true }, new object[] { "http://www.contoso.com/\u00E4/ ", true }, new object[] { "http://www.contoso.com/path/\u00E4/path /", false }, new object[] { "http://www.contoso.com/path/\u00E4/path / ", false }, new object[] { "http://www.contoso.com/path/\u00E4/path/", true }, new object[] { "http://www.contoso.com/path/\u00E4 /path/", false }, new object[] { "http://www.contoso.com/path/\u00E4 /path/ ", false }, new object[] { "http://www.contoso.com/path/\u00E4/path/ \u00E4/", false }, // Test Query new object[] { "http://www.contoso.com/path?name", true }, new object[] { "http://www.contoso.com/path?\u00E4", true }, new object[] { "http://www.contoso.com/path?name ", true }, new object[] { "http://www.contoso.com/path?\u00E4 ", true }, new object[] { "http://www.contoso.com/path ?name ", false }, new object[] { "http://www.contoso.com/path ?\u00E4 ", false }, new object[] { "http://www.contoso.com/path?par=val?", true }, new object[] { "http://www.contoso.com/path?\u00E4=\u00E4?", true }, new object[] { "http://www.contoso.com/path? name ", false }, new object[] { "http://www.contoso.com/path? \u00E4 ", false }, new object[] { "http://www.contoso.com/path?p=", true }, new object[] { "http://www.contoso.com/path?\u00E4=", true }, new object[] { "http://www.contoso.com/path?p= ", true }, new object[] { "http://www.contoso.com/path?\u00E4= ", true }, new object[] { "http://www.contoso.com/path?p= val", false }, new object[] { "http://www.contoso.com/path?\u00E4= \u00E4", false }, new object[] { "http://www.contoso.com/path?par=value& par=value", false }, new object[] { "http://www.contoso.com/path?\u00E4=\u00E4& \u00E4=\u00E4", false }, // Test Fragment new object[] { "http://www.contoso.com/path?name#", true }, new object[] { "http://www.contoso.com/path?\u00E4#", true }, new object[] { "http://www.contoso.com/path?name# ", true }, new object[] { "http://www.contoso.com/path?\u00E4# ", true }, new object[] { "http://www.contoso.com/path?name#a", true }, new object[] { "http://www.contoso.com/path?\u00E4#\u00E4", true }, new object[] { "http://www.contoso.com/path?name#a ", true }, new object[] { "http://www.contoso.com/path?\u00E4#\u00E4 ", true }, new object[] { "http://www.contoso.com/path?name# a", false }, new object[] { "http://www.contoso.com/path?\u00E4# \u00E4", false }, // Test Path+Query new object[] { "http://www.contoso.com/path? a ", false }, new object[] { "http://www.contoso.com/\u00E4? \u00E4 ", false }, new object[] { "http://www.contoso.com/a?val", true }, new object[] { "http://www.contoso.com/\u00E4?\u00E4", true }, new object[] { "http://www.contoso.com/path /path?par=val", false }, new object[] { "http://www.contoso.com/\u00E4 /\u00E4?\u00E4=\u00E4", false }, // Test Path+Query+Fragment new object[] { "http://www.contoso.com/path?a#a", true }, new object[] { "http://www.contoso.com/\u00E4?\u00E4#\u00E4", true }, new object[] { "http://www.contoso.com/path?par=val#a ", true }, new object[] { "http://www.contoso.com/\u00E4?\u00E4=\u00E4#\u00E4 ", true }, new object[] { "http://www.contoso.com/path?val#", true }, new object[] { "http://www.contoso.com/\u00E4?\u00E4#", true }, new object[] { "http://www.contoso.com/path?val#?val", true }, new object[] { "http://www.contoso.com/\u00E4?\u00E4#?\u00E4", true }, new object[] { "http://www.contoso.com/path?val #", false }, new object[] { "http://www.contoso.com/\u00E4?\u00E4 #", false }, new object[] { "http://www.contoso.com/path?val# val", false }, new object[] { "http://www.contoso.com/\u00E4?\u00E4# \u00E4", false }, new object[] { "http://www.contoso.com/path?val# val ", false }, new object[] { "http://www.contoso.com/\u00E4?\u00E4# \u00E4 ", false }, new object[] { "http://www.contoso.com/path?val#val ", true }, new object[] { "http://www.contoso.com/\u00E4?\u00E4#\u00E4 ", true }, new object[] { "http://www.contoso.com/ path?a#a", false }, new object[] { "http://www.contoso.com/ \u00E4?\u00E4#\u00E4", false }, new object[] { "http://www.contoso.com/ path?a #a", false }, new object[] { "http://www.contoso.com/ \u00E4?\u00E4 #\u00E4", false }, new object[] { "http://www.contoso.com/ path?a #a ", false }, new object[] { "http://www.contoso.com/ \u00E4?\u00E4 #\u00E4 ", false }, new object[] { "http://www.contoso.com/path?a# a ", false }, new object[] { "http://www.contoso.com/path?\u00E4# \u00E4 ", false }, new object[] { "http://www.contoso.com/path?a#a?a", true }, new object[] { "http://www.contoso.com/\u00E4?\u00E4#u00E4?\u00E4", true }, // Sample in "private unsafe Check CheckCanonical(char* str, ref ushort idx, ushort end, char delim)" code comments new object[] { "http://www.contoso.com/\u00E4/ path2/ param=val", false }, new object[] { "http://www.contoso.com/\u00E4? param=val", false }, new object[] { "http://www.contoso.com/\u00E4?param=val# fragment", false }, }; [Theory] [MemberData(nameof(TestIsWellFormedUriStringData))] public static void TestIsWellFormedUriString(string uriString, bool expected) { Assert.Equal(expected, Uri.IsWellFormedUriString(uriString, UriKind.RelativeOrAbsolute)); } public static IEnumerable<object[]> UriIsWellFormedUnwiseStringData => new List<object[]> { // escaped new object[] { "https://www.contoso.com/?a=%7B%7C%7D&b=%E2%80%99", true }, new object[] { "https://www.contoso.com/?a=%7B%7C%7D%E2%80%99", true }, // unescaped new object[] { "https://www.contoso.com/?a=}", false }, new object[] { "https://www.contoso.com/?a=|", false }, new object[] { "https://www.contoso.com/?a={", false }, // not query new object[] { "https://www.%7Bcontoso.com/", false }, new object[] { "http%7Bs://www.contoso.com/", false }, new object[] { "https://www.contoso.com%7B/", false }, new object[] { "htt%7Cps://www.contoso.com/", false }, new object[] { "https://www.con%7Ctoso.com/", false }, new object[] { "https://www.contoso.com%7C/", false }, new object[] { "htt%7Dps://www.contoso.com/", false }, new object[] { "https://www.con%7Dtoso.com/", false }, new object[] { "https://www.contoso.com%7D/", false }, new object[] { "htt{ps://www.contoso.com/", false }, new object[] { "https://www.con{toso.com/", false }, new object[] { "https://www.contoso.com{/", false }, new object[] { "htt|ps://www.contoso.com/", false }, new object[] { "https://www.con|toso.com/", false }, new object[] { "https://www.contoso.com|/", false }, new object[] { "htt}ps://www.contoso.com/", false }, new object[] { "https://www.con}toso.com/", false }, new object[] { "https://www.contoso.com}/", false }, }; [Theory] [MemberData(nameof(UriIsWellFormedUnwiseStringData))] public void UriIsWellFormed_AbsoluteUnicodeWithUnwise_Success(string uriString, bool expected) { Assert.Equal(expected, Uri.IsWellFormedUriString(uriString, UriKind.Absolute)); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using Xunit; namespace System.PrivateUri.Tests { /// <summary> /// Summary description for UriIsWellFormedUriStringTest /// </summary> public class UriIsWellFormedUriStringTest { [Fact] public void UriIsWellFormed_AbsoluteWellFormed_Success() { Assert.True(Uri.IsWellFormedUriString("http://foo.com/bad:url", UriKind.Absolute)); } [Fact] public void UriIsWellFormed_RelativeWellFormed_Success() { Assert.True(Uri.IsWellFormedUriString("/path/file?Query", UriKind.Relative)); } [Fact] public void UriIsWellFormed_RelativeWithColon_Failure() { Assert.False(Uri.IsWellFormedUriString("http://foo", UriKind.Relative)); } [Fact] public void UriIsWellFormed_RelativeWithPercentAndColon_Failure() { Assert.False(Uri.IsWellFormedUriString("bad%20http://foo", UriKind.Relative)); } [Fact] public void UriIsWellFormed_NewRelativeRegisteredAbsolute_Throws() { Assert.ThrowsAny<FormatException>(() => new Uri("http://foo", UriKind.Relative)); } [Fact] public void UriIsWellFormed_NewAbsoluteUnregisteredAsRelative_Throws() { Assert.ThrowsAny<FormatException>(() => new Uri("any://foo", UriKind.Relative)); } [Fact] public void UriIsWellFormed_NewRelativeWithKnownSchemeAndQuery_SuccessButNotWellFormed() { Uri test = new Uri("http:?foo", UriKind.Relative); Assert.False(Uri.IsWellFormedUriString(test.ToString(), UriKind.Relative), "Not well formed"); Assert.False(Uri.IsWellFormedUriString(test.ToString(), UriKind.Absolute), "Should not be well formed"); Assert.True(Uri.TryCreate(test.ToString(), UriKind.Relative, out test), "TryCreate Mismatch"); Uri result = new Uri(new Uri("http://host.com"), test); Assert.True(Uri.IsWellFormedUriString(result.ToString(), UriKind.Absolute), "Not well formed"); } [Fact] public void UriIsWellFormed_NewRelativeWithUnknownSchemeAndQuery_Throws() { Assert.False(Uri.TryCreate("any:?foo", UriKind.Relative, out _), "TryCreate should have Failed"); // The generic parser allows this kind of absolute Uri, where the http parser does not Assert.ThrowsAny<FormatException>(() => new Uri("any:?foo", UriKind.Relative)); } [Fact] public void UriIsWellFormed_TryCreateNewRelativeWithColon_Failure() { Assert.False(Uri.TryCreate("http://foo", UriKind.Relative, out _)); } // App-compat - A colon in the first segment of a relative Uri is invalid, but we cannot reject it. [Fact] public void UriIsWellFormed_TryCreateNewRelativeWithPercentAndColon_Success() { string input = "bad%20http://foo"; Assert.True(Uri.TryCreate(input, UriKind.Relative, out Uri test)); Assert.False(test.IsWellFormedOriginalString()); Assert.False(Uri.IsWellFormedUriString(input, UriKind.Relative)); Assert.False(Uri.IsWellFormedUriString(input, UriKind.RelativeOrAbsolute)); Assert.False(Uri.IsWellFormedUriString(input, UriKind.Absolute)); } [Fact] public void UriIsWellFormed_AbsoluteWithColonToRelative_AppendsDotSlash() { Uri baseUri = new Uri("https://base.com/path/stuff"); Uri test = new Uri("https://base.com/path/hi:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Assert.True(Uri.IsWellFormedUriString(rel.ToString(), UriKind.Relative), "Not well formed: " + rel); Uri result = new Uri(baseUri, rel); Assert.Equal(test, result); //"Transitivity failure" Assert.True(string.CompareOrdinal(rel.ToString(), 0, "./", 0, 2) == 0, "Cannot have colon in first segment, must append ./"); } [Fact] public void UriIsWellFormed_AbsoluteWithPercentAndColonToRelative_AppendsDotSlash() { Uri baseUri = new Uri("https://base.com/path/stuff"); Uri test = new Uri("https://base.com/path/h%20i:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Assert.True(Uri.IsWellFormedUriString(rel.ToString(), UriKind.Relative), "Not well formed: " + rel); Uri result = new Uri(baseUri, rel); Assert.Equal(test, result); //"Transitivity failure" Assert.True(string.CompareOrdinal(rel.ToString(), 0, "./", 0, 2) == 0, "Cannot have colon in first segment, must append ./"); } [Fact] public void UriMakeRelative_ImplicitFileCommonBaseWithColon_AppendsDotSlash() { Uri baseUri = new Uri(@"c:/base/path/stuff"); Uri test = new Uri(@"c:/base/path/hi:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Assert.True(Uri.IsWellFormedUriString(rel.ToString(), UriKind.Relative), "Not well formed: " + rel); Uri result = new Uri(baseUri, rel); Assert.Equal(test.LocalPath, result.LocalPath); // "Transitivity failure" Assert.True(string.CompareOrdinal(rel.ToString(), 0, "./", 0, 2) == 0, "Cannot have colon in first segment, must append ./"); } [Fact] public void UriMakeRelative_ImplicitFileDifferentBaseWithColon_ReturnsSecondUri() { Uri baseUri = new Uri(@"c:/base/path/stuff"); Uri test = new Uri(@"d:/base/path/hi:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Uri result = new Uri(baseUri, rel); Assert.Equal(test.LocalPath, result.LocalPath); //"Transitivity failure" } [Fact] public void UriMakeRelative_ExplicitFileDifferentBaseWithColon_ReturnsSecondUri() { Uri baseUri = new Uri(@"file://c:/stuff"); Uri test = new Uri(@"file://d:/hi:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Assert.False(rel.IsAbsoluteUri, "Result should be relative"); Assert.Equal("d:/hi:there/", rel.ToString()); Uri result = new Uri(baseUri, rel); Assert.Equal(test.LocalPath, result.LocalPath); // "Transitivity failure" Assert.Equal(test.ToString(), result.ToString()); // "Transitivity failure" } [Fact] public void UriMakeRelative_ExplicitUncFileVsDosFile_ReturnsSecondPath() { Uri baseUri = new Uri(@"file:///u:/stuff"); Uri test = new Uri(@"file:///unc/hi:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Uri result = new Uri(baseUri, rel); // This is a known oddity when mix and matching Unc & dos paths in this order. // The other way works as expected. Assert.Equal("file:///u:/unc/hi:there/", result.ToString()); } [Fact] public void UriMakeRelative_ExplicitDosFileWithHost_ReturnsSecondPath() { Uri baseUri = new Uri(@"file://host/u:/stuff"); Uri test = new Uri(@"file://host/unc/hi:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Uri result = new Uri(baseUri, rel); Assert.Equal(test.LocalPath, result.LocalPath); // "Transitivity failure" } [Fact] public void UriMakeRelative_ExplicitDosFileSecondWithHost_ReturnsSecondPath() { Uri baseUri = new Uri(@"file://host/unc/stuff"); Uri test = new Uri(@"file://host/u:/hi:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Uri result = new Uri(baseUri, rel); Assert.Equal(test.LocalPath, result.LocalPath); //"Transitivity failure" } [Fact] public void UriMakeRelative_ExplicitDosFileVsUncFile_ReturnsSecondUri() { Uri baseUri = new Uri(@"file:///unc/stuff"); Uri test = new Uri(@"file:///u:/hi:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Uri result = new Uri(baseUri, rel); Assert.Equal(test.LocalPath, result.LocalPath); //"Transitivity failure" } [Fact] public void UriMakeRelative_ExplicitDosFileContainingImplicitDosPath_AddsDotSlash() { Uri baseUri = new Uri(@"file:///u:/stuff/file"); Uri test = new Uri(@"file:///u:/stuff/h:there/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Uri result = new Uri(baseUri, rel); Assert.Equal(test.LocalPath, result.LocalPath); //"Transitivity failure" } [Fact] public void UriMakeRelative_DifferentSchemes_ReturnsSecondUri() { Uri baseUri = new Uri(@"http://base/path/stuff"); Uri test = new Uri(@"https://base/path/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Assert.Equal(test, rel); Assert.True(Uri.IsWellFormedUriString(rel.ToString(), UriKind.Absolute), "Not well formed: " + rel); Uri result = new Uri(baseUri, rel); Assert.Equal(result, test); } [Fact] public void UriMakeRelative_DifferentHost_ReturnsSecondUri() { Uri baseUri = new Uri(@"http://host1/path/stuff"); Uri test = new Uri(@"http://host2/path/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Assert.Equal(test, rel); Assert.True(Uri.IsWellFormedUriString(rel.ToString(), UriKind.Absolute), "Not well formed: " + rel); Uri result = new Uri(baseUri, rel); Assert.Equal(result, test); } [Fact] public void UriMakeRelative_DifferentPort_ReturnsSecondUri() { Uri baseUri = new Uri(@"http://host:1/path/stuff"); Uri test = new Uri(@"http://host:2/path/", UriKind.Absolute); Uri rel = baseUri.MakeRelativeUri(test); Assert.Equal(test, rel); Assert.True(Uri.IsWellFormedUriString(rel.ToString(), UriKind.Absolute), "Not well formed: " + rel); Uri result = new Uri(baseUri, rel); Assert.Equal(result, test); } [Fact] public void UriIsWellFormed_IPv6HostIriOn_True() { Assert.True(Uri.IsWellFormedUriString("http://[::1]/", UriKind.Absolute)); } public static IEnumerable<object[]> TestIsWellFormedUriStringData => new List<object[]> { // Test ImplicitFile/UNC new object[] { "c:\\directory\filename", false }, new object[] { "file://c:/directory/filename", false }, new object[] { "\\\\?\\UNC\\Server01\\user\\docs\\Letter.txt", false }, // Test Host new object[] { "http://www.contoso.com", true }, new object[] { "http://\u00E4.contos.com", true }, new object[] { "http://www.contos\u00E4.com", true }, new object[] { "http://www.contoso.com ", true }, new object[] { "http://\u00E4.contos.com ", true }, new object[] { "http:// www.contoso.com", false }, new object[] { "http:// \u00E4.contos.com", false }, new object[] { "http:// www.contos\u00E4.com", false }, new object[] { "http://www.contos o.com", false }, new object[] { "http://www.contos \u00E4.com", false }, // Test Path new object[] { "http://www.contoso.com/path???/file name", false }, new object[] { "http://www.contoso.com/\u00E4???/file name", false }, new object[] { "http:\\host/path/file", false }, new object[] { "http://www.contoso.com/a/sek http://test.com", false }, new object[] { "http://www.contoso.com/\u00E4/sek http://test.com", false }, new object[] { "http://www.contoso.com/ seka http://test.com", false }, new object[] { "http://www.contoso.com/ sek\u00E4 http://test.com", false }, new object[] { "http://www.contoso.com/ a sek http://test.com", false }, new object[] { "http://www.contoso.com/ \u00E4 sek http://test.com", false }, new object[] { "http://www.contoso.com/ \u00E4/", false }, new object[] { "http://www.contoso.com/ path/", false }, new object[] { "http://www.contoso.com/path", true }, new object[] { "http://www.contoso.com/\u00E4/", true }, new object[] { "http://www.contoso.com/path/#", true }, new object[] { "http://www.contoso.com/\u00E4/#", true }, new object[] { "http://www.contoso.com/path/# ", true }, new object[] { "http://www.contoso.com/\u00E4/# ", true }, new object[] { "http://www.contoso.com/path/ # ", false }, new object[] { "http://www.contoso.com/\u00E4/ # ", false }, new object[] { "http://www.contoso.com/path/ #", false }, new object[] { "http://www.contoso.com/\u00E4/ #", false }, new object[] { "http://www.contoso.com/path ", true }, new object[] { "http://www.contoso.com/\u00E4/ ", true }, new object[] { "http://www.contoso.com/path/\u00E4/path /", false }, new object[] { "http://www.contoso.com/path/\u00E4/path / ", false }, new object[] { "http://www.contoso.com/path/\u00E4/path/", true }, new object[] { "http://www.contoso.com/path/\u00E4 /path/", false }, new object[] { "http://www.contoso.com/path/\u00E4 /path/ ", false }, new object[] { "http://www.contoso.com/path/\u00E4/path/ \u00E4/", false }, // Test Query new object[] { "http://www.contoso.com/path?name", true }, new object[] { "http://www.contoso.com/path?\u00E4", true }, new object[] { "http://www.contoso.com/path?name ", true }, new object[] { "http://www.contoso.com/path?\u00E4 ", true }, new object[] { "http://www.contoso.com/path ?name ", false }, new object[] { "http://www.contoso.com/path ?\u00E4 ", false }, new object[] { "http://www.contoso.com/path?par=val?", true }, new object[] { "http://www.contoso.com/path?\u00E4=\u00E4?", true }, new object[] { "http://www.contoso.com/path? name ", false }, new object[] { "http://www.contoso.com/path? \u00E4 ", false }, new object[] { "http://www.contoso.com/path?p=", true }, new object[] { "http://www.contoso.com/path?\u00E4=", true }, new object[] { "http://www.contoso.com/path?p= ", true }, new object[] { "http://www.contoso.com/path?\u00E4= ", true }, new object[] { "http://www.contoso.com/path?p= val", false }, new object[] { "http://www.contoso.com/path?\u00E4= \u00E4", false }, new object[] { "http://www.contoso.com/path?par=value& par=value", false }, new object[] { "http://www.contoso.com/path?\u00E4=\u00E4& \u00E4=\u00E4", false }, // Test Fragment new object[] { "http://www.contoso.com/path?name#", true }, new object[] { "http://www.contoso.com/path?\u00E4#", true }, new object[] { "http://www.contoso.com/path?name# ", true }, new object[] { "http://www.contoso.com/path?\u00E4# ", true }, new object[] { "http://www.contoso.com/path?name#a", true }, new object[] { "http://www.contoso.com/path?\u00E4#\u00E4", true }, new object[] { "http://www.contoso.com/path?name#a ", true }, new object[] { "http://www.contoso.com/path?\u00E4#\u00E4 ", true }, new object[] { "http://www.contoso.com/path?name# a", false }, new object[] { "http://www.contoso.com/path?\u00E4# \u00E4", false }, // Test Path+Query new object[] { "http://www.contoso.com/path? a ", false }, new object[] { "http://www.contoso.com/\u00E4? \u00E4 ", false }, new object[] { "http://www.contoso.com/a?val", true }, new object[] { "http://www.contoso.com/\u00E4?\u00E4", true }, new object[] { "http://www.contoso.com/path /path?par=val", false }, new object[] { "http://www.contoso.com/\u00E4 /\u00E4?\u00E4=\u00E4", false }, // Test Path+Query+Fragment new object[] { "http://www.contoso.com/path?a#a", true }, new object[] { "http://www.contoso.com/\u00E4?\u00E4#\u00E4", true }, new object[] { "http://www.contoso.com/path?par=val#a ", true }, new object[] { "http://www.contoso.com/\u00E4?\u00E4=\u00E4#\u00E4 ", true }, new object[] { "http://www.contoso.com/path?val#", true }, new object[] { "http://www.contoso.com/\u00E4?\u00E4#", true }, new object[] { "http://www.contoso.com/path?val#?val", true }, new object[] { "http://www.contoso.com/\u00E4?\u00E4#?\u00E4", true }, new object[] { "http://www.contoso.com/path?val #", false }, new object[] { "http://www.contoso.com/\u00E4?\u00E4 #", false }, new object[] { "http://www.contoso.com/path?val# val", false }, new object[] { "http://www.contoso.com/\u00E4?\u00E4# \u00E4", false }, new object[] { "http://www.contoso.com/path?val# val ", false }, new object[] { "http://www.contoso.com/\u00E4?\u00E4# \u00E4 ", false }, new object[] { "http://www.contoso.com/path?val#val ", true }, new object[] { "http://www.contoso.com/\u00E4?\u00E4#\u00E4 ", true }, new object[] { "http://www.contoso.com/ path?a#a", false }, new object[] { "http://www.contoso.com/ \u00E4?\u00E4#\u00E4", false }, new object[] { "http://www.contoso.com/ path?a #a", false }, new object[] { "http://www.contoso.com/ \u00E4?\u00E4 #\u00E4", false }, new object[] { "http://www.contoso.com/ path?a #a ", false }, new object[] { "http://www.contoso.com/ \u00E4?\u00E4 #\u00E4 ", false }, new object[] { "http://www.contoso.com/path?a# a ", false }, new object[] { "http://www.contoso.com/path?\u00E4# \u00E4 ", false }, new object[] { "http://www.contoso.com/path?a#a?a", true }, new object[] { "http://www.contoso.com/\u00E4?\u00E4#u00E4?\u00E4", true }, // Sample in "private unsafe Check CheckCanonical(char* str, ref ushort idx, ushort end, char delim)" code comments new object[] { "http://www.contoso.com/\u00E4/ path2/ param=val", false }, new object[] { "http://www.contoso.com/\u00E4? param=val", false }, new object[] { "http://www.contoso.com/\u00E4?param=val# fragment", false }, }; [Theory] [MemberData(nameof(TestIsWellFormedUriStringData))] public static void TestIsWellFormedUriString(string uriString, bool expected) { Assert.Equal(expected, Uri.IsWellFormedUriString(uriString, UriKind.RelativeOrAbsolute)); } public static IEnumerable<object[]> UriIsWellFormedUnwiseStringData => new List<object[]> { // escaped new object[] { "https://www.contoso.com/?a=%7B%7C%7D&b=%E2%80%99", true }, new object[] { "https://www.contoso.com/?a=%7B%7C%7D%E2%80%99", true }, // unescaped new object[] { "https://www.contoso.com/?a=}", false }, new object[] { "https://www.contoso.com/?a=|", false }, new object[] { "https://www.contoso.com/?a={", false }, // not query new object[] { "https://www.%7Bcontoso.com/", false }, new object[] { "http%7Bs://www.contoso.com/", false }, new object[] { "https://www.contoso.com%7B/", false }, new object[] { "htt%7Cps://www.contoso.com/", false }, new object[] { "https://www.con%7Ctoso.com/", false }, new object[] { "https://www.contoso.com%7C/", false }, new object[] { "htt%7Dps://www.contoso.com/", false }, new object[] { "https://www.con%7Dtoso.com/", false }, new object[] { "https://www.contoso.com%7D/", false }, new object[] { "htt{ps://www.contoso.com/", false }, new object[] { "https://www.con{toso.com/", false }, new object[] { "https://www.contoso.com{/", false }, new object[] { "htt|ps://www.contoso.com/", false }, new object[] { "https://www.con|toso.com/", false }, new object[] { "https://www.contoso.com|/", false }, new object[] { "htt}ps://www.contoso.com/", false }, new object[] { "https://www.con}toso.com/", false }, new object[] { "https://www.contoso.com}/", false }, }; [Theory] [MemberData(nameof(UriIsWellFormedUnwiseStringData))] public void UriIsWellFormed_AbsoluteUnicodeWithUnwise_Success(string uriString, bool expected) { Assert.Equal(expected, Uri.IsWellFormedUriString(uriString, UriKind.Absolute)); } } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/thread/threadsusp.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*++ Module Name: threadsusp.cpp Abstract: Implementation of functions related to threads. Revision History: --*/ #include "pal/corunix.hpp" #include "pal/thread.hpp" #include "pal/mutex.hpp" #include "pal/seh.hpp" #include "pal/init.h" #include "pal/dbgmsg.h" #include <pthread.h> #include <unistd.h> #include <errno.h> #include <stddef.h> #include <sys/stat.h> #include <limits.h> #include <debugmacrosext.h> using namespace CorUnix; /* ------------------- Definitions ------------------------------*/ SET_DEFAULT_DEBUG_CHANNEL(THREAD); /* This code is written to the blocking pipe of a thread that was created in suspended state in order to resume it. */ CONST BYTE WAKEUPCODE=0x2A; // #define USE_GLOBAL_LOCK_FOR_SUSPENSION // Uncomment this define to use the global suspension lock. /* The global suspension lock can be used in place of each thread having its own suspension mutex or spinlock. The downside is that it restricts us to only performing one suspension or resumption in the PAL at a time. */ #ifdef USE_GLOBAL_LOCK_FOR_SUSPENSION static LONG g_ssSuspensionLock = 0; #endif /*++ Function: InternalSuspendNewThreadFromData On platforms where we use pipes for starting threads suspended, this function sets the blocking pipe for the thread and blocks until the wakeup code is written to the pipe by ResumeThread. --*/ PAL_ERROR CThreadSuspensionInfo::InternalSuspendNewThreadFromData( CPalThread *pThread ) { PAL_ERROR palError = NO_ERROR; AcquireSuspensionLock(pThread); pThread->suspensionInfo.SetSelfSusp(TRUE); ReleaseSuspensionLock(pThread); int pipe_descs[2]; int pipeRv = #if HAVE_PIPE2 pipe2(pipe_descs, O_CLOEXEC); #else pipe(pipe_descs); #endif // HAVE_PIPE2 if (pipeRv == -1) { ERROR("pipe() failed! error is %d (%s)\n", errno, strerror(errno)); return ERROR_NOT_ENOUGH_MEMORY; } #if !HAVE_PIPE2 fcntl(pipe_descs[0], F_SETFD, FD_CLOEXEC); // make pipe non-inheritable, if possible fcntl(pipe_descs[1], F_SETFD, FD_CLOEXEC); #endif // !HAVE_PIPE2 // [0] is the read end of the pipe, and [1] is the write end. pThread->suspensionInfo.SetBlockingPipe(pipe_descs[1]); pThread->SetStartStatus(TRUE); BYTE resume_code = 0; ssize_t read_ret; // Block until ResumeThread writes something to the pipe while ((read_ret = read(pipe_descs[0], &resume_code, sizeof(resume_code))) != sizeof(resume_code)) { if (read_ret != -1 || EINTR != errno) { // read might return 0 (with EAGAIN) if the other end of the pipe gets closed palError = ERROR_INTERNAL_ERROR; break; } } if (palError == NO_ERROR && resume_code != WAKEUPCODE) { // If we did read successfully but the byte didn't match WAKEUPCODE, we treat it as a failure. palError = ERROR_INTERNAL_ERROR; } if (palError == NO_ERROR) { AcquireSuspensionLock(pThread); pThread->suspensionInfo.SetSelfSusp(FALSE); ReleaseSuspensionLock(pThread); } // Close the pipes regardless of whether we were successful. close(pipe_descs[0]); close(pipe_descs[1]); return palError; } /*++ Function: ResumeThread See MSDN doc. --*/ DWORD PALAPI ResumeThread( IN HANDLE hThread ) { PAL_ERROR palError; CPalThread *pthrResumer; DWORD dwSuspendCount = (DWORD)-1; PERF_ENTRY(ResumeThread); ENTRY("ResumeThread(hThread=%p)\n", hThread); pthrResumer = InternalGetCurrentThread(); palError = InternalResumeThread( pthrResumer, hThread, &dwSuspendCount ); if (NO_ERROR != palError) { pthrResumer->SetLastError(palError); dwSuspendCount = (DWORD) -1; } else { _ASSERT_MSG(dwSuspendCount != static_cast<DWORD>(-1), "InternalResumeThread returned success but dwSuspendCount did not change.\n"); } LOGEXIT("ResumeThread returns DWORD %u\n", dwSuspendCount); PERF_EXIT(ResumeThread); return dwSuspendCount; } /*++ Function: InternalResumeThread InternalResumeThread converts the handle of the target thread to a CPalThread, and passes both the resumer and target thread references to InternalResumeThreadFromData. A reference to the suspend count from the resumption attempt is passed back to the caller of this function. --*/ PAL_ERROR CorUnix::InternalResumeThread( CPalThread *pthrResumer, HANDLE hTargetThread, DWORD *pdwSuspendCount ) { PAL_ERROR palError = NO_ERROR; CPalThread *pthrTarget = NULL; IPalObject *pobjThread = NULL; palError = InternalGetThreadDataFromHandle( pthrResumer, hTargetThread, &pthrTarget, &pobjThread ); if (NO_ERROR == palError) { palError = pthrResumer->suspensionInfo.InternalResumeThreadFromData( pthrResumer, pthrTarget, pdwSuspendCount ); } if (NULL != pobjThread) { pobjThread->ReleaseReference(pthrResumer); } return palError; } /*++ Function: InternalResumeThreadFromData InternalResumeThreadFromData resumes the target thread. First, the suspension mutexes of the threads are acquired. Next, there's a check to ensure that the target thread was actually suspended. Finally, the resume attempt is made and the suspension mutexes are released. The suspend count of the target thread is passed back to the caller of this function. Note that ReleaseSuspensionLock(s) is called before hitting ASSERTs in error paths. Currently, this seems unnecessary since asserting within InternalResumeThreadFromData will not cause cleanup to occur. However, this may change since it would be preferable to perform cleanup. Thus, calls to release suspension locks remain in the error paths. --*/ PAL_ERROR CThreadSuspensionInfo::InternalResumeThreadFromData( CPalThread *pthrResumer, CPalThread *pthrTarget, DWORD *pdwSuspendCount ) { PAL_ERROR palError = NO_ERROR; int nWrittenBytes = -1; if (SignalHandlerThread == pthrTarget->GetThreadType()) { ASSERT("Attempting to resume the signal handling thread, which can never be suspended.\n"); palError = ERROR_INVALID_HANDLE; goto InternalResumeThreadFromDataExit; } // Acquire suspension mutex AcquireSuspensionLocks(pthrResumer, pthrTarget); // Check target thread's state to ensure it hasn't died. // Setting a thread's state to TS_DONE is protected by the // target's suspension mutex. if (pthrTarget->synchronizationInfo.GetThreadState() == TS_DONE) { palError = ERROR_INVALID_HANDLE; ReleaseSuspensionLocks(pthrResumer, pthrTarget); goto InternalResumeThreadFromDataExit; } // If this is a dummy thread, then it represents a process that was created with CREATE_SUSPENDED // and it should have a blocking pipe set. If GetBlockingPipe returns -1 for a dummy thread, then // something is wrong - either CREATE_SUSPENDED wasn't used or the process was already resumed. if (pthrTarget->IsDummy() && -1 == pthrTarget->suspensionInfo.GetBlockingPipe()) { palError = ERROR_INVALID_HANDLE; ERROR("Tried to wake up dummy thread without a blocking pipe.\n"); ReleaseSuspensionLocks(pthrResumer, pthrTarget); goto InternalResumeThreadFromDataExit; } // If there is a blocking pipe on this thread, resume it by writing the wake up code to that pipe. if (-1 != pthrTarget->suspensionInfo.GetBlockingPipe()) { // If write() is interrupted by a signal before writing data, // it returns -1 and sets errno to EINTR. In this case, we // attempt the write() again. writeAgain: nWrittenBytes = write(pthrTarget->suspensionInfo.GetBlockingPipe(), &WAKEUPCODE, sizeof(WAKEUPCODE)); // The size of WAKEUPCODE is 1 byte. If write returns 0, we'll treat it as an error. if (sizeof(WAKEUPCODE) != nWrittenBytes) { // If we are here during process creation, this is most likely caused by the target // process dying before reaching this point and thus breaking the pipe. if (nWrittenBytes == -1 && EPIPE == errno) { palError = ERROR_INVALID_HANDLE; ReleaseSuspensionLocks(pthrResumer, pthrTarget); ERROR("Write failed with EPIPE\n"); goto InternalResumeThreadFromDataExit; } else if (nWrittenBytes == 0 || (nWrittenBytes == -1 && EINTR == errno)) { TRACE("write() failed with EINTR; re-attempting write\n"); goto writeAgain; } else { // Some other error occurred; need to release suspension mutexes before leaving ResumeThread. palError = ERROR_INTERNAL_ERROR; ReleaseSuspensionLocks(pthrResumer, pthrTarget); ASSERT("Write() failed; error is %d (%s)\n", errno, strerror(errno)); goto InternalResumeThreadFromDataExit; } } // Reset blocking pipe to -1 since we're done using it. pthrTarget->suspensionInfo.SetBlockingPipe(-1); ReleaseSuspensionLocks(pthrResumer, pthrTarget); goto InternalResumeThreadFromDataExit; } else { *pdwSuspendCount = 0; palError = ERROR_BAD_COMMAND; } InternalResumeThreadFromDataExit: if (NO_ERROR == palError) { *pdwSuspendCount = 1; } return palError; } /*++ Function: TryAcquireSuspensionLock TryAcquireSuspensionLock is a utility function that tries to acquire a thread's suspension mutex or spinlock. If it succeeds, the function returns TRUE. Otherwise, it returns FALSE. This function is used in AcquireSuspensionLocks. Note that the global lock cannot be acquired in this function since it makes no sense to do so. A thread holding the global lock is the only thread that can perform suspend or resume operations so it doesn't need to acquire a second lock. --*/ BOOL CThreadSuspensionInfo::TryAcquireSuspensionLock( CPalThread* pthrTarget ) { int iPthreadRet = 0; #if DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX { iPthreadRet = SPINLOCKTryAcquire(pthrTarget->suspensionInfo.GetSuspensionSpinlock()); } #else // DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX { iPthreadRet = pthread_mutex_trylock(pthrTarget->suspensionInfo.GetSuspensionMutex()); _ASSERT_MSG(iPthreadRet == 0 || iPthreadRet == EBUSY, "pthread_mutex_trylock returned %d\n", iPthreadRet); } #endif // DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX // If iPthreadRet is 0, lock acquisition was successful. Otherwise, it failed. return (iPthreadRet == 0); } /*++ Function: AcquireSuspensionLock AcquireSuspensionLock acquires a thread's suspension mutex or spinlock. If USE_GLOBAL_LOCK_FOR_SUSPENSION is defined, it will acquire the global lock. A thread in this function blocks until it acquires its lock, unlike in TryAcquireSuspensionLock. --*/ void CThreadSuspensionInfo::AcquireSuspensionLock( CPalThread* pthrCurrent ) { #ifdef USE_GLOBAL_LOCK_FOR_SUSPENSION { SPINLOCKAcquire(&g_ssSuspensionLock, 0); } #else // USE_GLOBAL_LOCK_FOR_SUSPENSION { #if DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX { SPINLOCKAcquire(&pthrCurrent->suspensionInfo.m_nSpinlock, 0); } #else // DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX { INDEBUG(int iPthreadError = ) pthread_mutex_lock(&pthrCurrent->suspensionInfo.m_ptmSuspmutex); _ASSERT_MSG(iPthreadError == 0, "pthread_mutex_lock returned %d\n", iPthreadError); } #endif // DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX } #endif // USE_GLOBAL_LOCK_FOR_SUSPENSION } /*++ Function: ReleaseSuspensionLock ReleaseSuspensionLock is a function that releases a thread's suspension mutex or spinlock. If USE_GLOBAL_LOCK_FOR_SUSPENSION is defined, it will release the global lock. --*/ void CThreadSuspensionInfo::ReleaseSuspensionLock( CPalThread* pthrCurrent ) { #ifdef USE_GLOBAL_LOCK_FOR_SUSPENSION { SPINLOCKRelease(&g_ssSuspensionLock); } #else // USE_GLOBAL_LOCK_FOR_SUSPENSION { #if DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX { SPINLOCKRelease(&pthrCurrent->suspensionInfo.m_nSpinlock); } #else // DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX { INDEBUG(int iPthreadError = ) pthread_mutex_unlock(&pthrCurrent->suspensionInfo.m_ptmSuspmutex); _ASSERT_MSG(iPthreadError == 0, "pthread_mutex_unlock returned %d\n", iPthreadError); } #endif // DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX } #endif // USE_GLOBAL_LOCK_FOR_SUSPENSION } /*++ Function: AcquireSuspensionLocks AcquireSuspensionLocks is used to acquire the suspension locks of a suspender (or resumer) and target thread. The thread will perform a blocking call to acquire its own suspension lock and will then try to acquire the target thread's lock without blocking. If it fails to acquire the target's lock, it releases its own lock and the thread will try to acquire both locks again. The key is that both locks must be acquired together. Originally, only blocking calls were used to acquire the suspender and the target lock. However, this was problematic since a thread could acquire its own lock and then block on acquiring the target lock. In the meantime, the target could have already acquired its own lock and be attempting to suspend the suspender thread. This clearly causes deadlock. A second approach used locking hierarchies, where locks were acquired use thread id ordering. This was better but suffered from the scenario where thread A acquires thread B's suspension mutex first. In the meantime, thread C acquires thread A's suspension mutex and its own. Thus, thread A is suspended while holding thread B's mutex. This is problematic if thread C now wants to suspend thread B. The issue here is that a thread can be suspended while holding someone else's mutex but not holding its own. In the end, the correct approach is to always acquire your suspension mutex first. This prevents you from being suspended while holding the target's mutex. Then, attempt to acquire the target's mutex. If the mutex cannot be acquired, release your own and try again. This all or nothing approach is the safest and avoids nasty race conditions. If USE_GLOBAL_LOCK_FOR_SUSPENSION is defined, the calling thread will acquire the global lock when possible. --*/ VOID CThreadSuspensionInfo::AcquireSuspensionLocks( CPalThread *pthrSuspender, CPalThread *pthrTarget ) { BOOL fReacquire = FALSE; #ifdef USE_GLOBAL_LOCK_FOR_SUSPENSION AcquireSuspensionLock(pthrSuspender); #else // USE_GLOBAL_LOCK_FOR_SUSPENSION do { fReacquire = FALSE; AcquireSuspensionLock(pthrSuspender); if (!TryAcquireSuspensionLock(pthrTarget)) { // pthread_mutex_trylock returned EBUSY so release the first lock and try again. ReleaseSuspensionLock(pthrSuspender); fReacquire = TRUE; sched_yield(); } } while (fReacquire); #endif // USE_GLOBAL_LOCK_FOR_SUSPENSION // Whenever the native implementation for the wait subsystem's thread // blocking requires a lock as protection (as pthread conditions do with // the associated mutex), we need to grab that lock to prevent the target // thread from being suspended while holding the lock. // Failing to do so can lead to a multiple threads deadlocking such as the // one described in VSW 363793. // In general, in similar scenarios, we need to grab the protecting lock // every time suspension safety/unsafety is unbalanced on the two sides // using the same condition (or any other native blocking support which // needs an associated native lock), i.e. when either the signaling // thread(s) is(are) signaling from an unsafe area and the waiting // thread(s) is(are) waiting from a safe one, or vice versa (the scenario // described in VSW 363793 is a good example of the first type of // unbalanced suspension safety/unsafety). // Instead, whenever signaling and waiting sides are both marked safe or // unsafe, the deadlock cannot take place since either the suspending // thread will suspend them anyway (regardless of the native lock), or it // won't suspend any of them, since they are both marked unsafe. // Such a balanced scenario applies, for instance, to critical sections // where depending on whether the target CS is internal or not, both the // signaling and the waiting side will access the mutex/condition from // respectively an unsafe or safe region. pthrTarget->AcquireNativeWaitLock(); } /*++ Function: ReleaseSuspensionLocks ReleaseSuspensionLocks releases both thread's suspension mutexes. Note that the locks are released in the opposite order they're acquired. This prevents a suspending or resuming thread from being suspended while holding the target's lock. If USE_GLOBAL_LOCK_FOR_SUSPENSION is defined, it simply releases the global lock. --*/ VOID CThreadSuspensionInfo::ReleaseSuspensionLocks( CPalThread *pthrSuspender, CPalThread *pthrTarget ) { // See comment in AcquireSuspensionLocks pthrTarget->ReleaseNativeWaitLock(); #ifdef USE_GLOBAL_LOCK_FOR_SUSPENSION ReleaseSuspensionLock(pthrSuspender); #else // USE_GLOBAL_LOCK_FOR_SUSPENSION ReleaseSuspensionLock(pthrTarget); ReleaseSuspensionLock(pthrSuspender); #endif // USE_GLOBAL_LOCK_FOR_SUSPENSION } /*++ Function: PostOnSuspendSemaphore PostOnSuspendSemaphore is a utility function for a thread to post on its POSIX or SysV suspension semaphore. --*/ void CThreadSuspensionInfo::PostOnSuspendSemaphore() { #if USE_POSIX_SEMAPHORES if (sem_post(&m_semSusp) == -1) { ASSERT("sem_post returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_SYSV_SEMAPHORES if (semop(m_nSemsuspid, &m_sbSempost, 1) == -1) { ASSERT("semop - post returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_PTHREAD_CONDVARS int status; // The suspending thread may not have entered the wait yet, in which case the cond var // signal below will be a no-op. To prevent the race condition we set m_fSuspended to // TRUE first (which the suspender will take as an indication that no wait is required). // But the setting of the flag and the signal must appear atomic to the suspender (as // reading the flag and potentially waiting must appear to us) to avoid the race // condition where the suspender reads the flag as FALSE, we set it and signal and the // suspender then waits. // Acquire the suspend mutex. Once we enter the critical section the suspender has // either gotten there before us (and is waiting for our signal) or is yet to even // check the flag (so we can set it here to stop them attempting a wait). status = pthread_mutex_lock(&m_mutexSusp); if (status != 0) { ASSERT("pthread_mutex_lock returned %d (%s)\n", status, strerror(status)); } m_fSuspended = TRUE; status = pthread_cond_signal(&m_condSusp); if (status != 0) { ASSERT("pthread_cond_signal returned %d (%s)\n", status, strerror(status)); } status = pthread_mutex_unlock(&m_mutexSusp); if (status != 0) { ASSERT("pthread_mutex_unlock returned %d (%s)\n", status, strerror(status)); } #endif // USE_POSIX_SEMAPHORES } /*++ Function: WaitOnSuspendSemaphore WaitOnSuspendSemaphore is a utility function for a thread to wait on its POSIX or SysV suspension semaphore. --*/ void CThreadSuspensionInfo::WaitOnSuspendSemaphore() { #if USE_POSIX_SEMAPHORES while (sem_wait(&m_semSusp) == -1) { ASSERT("sem_wait returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_SYSV_SEMAPHORES while (semop(m_nSemsuspid, &m_sbSemwait, 1) == -1) { ASSERT("semop wait returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_PTHREAD_CONDVARS int status; // By the time we wait the target thread may have already signalled its suspension (in // which case m_fSuspended will be TRUE and we shouldn't wait on the cond var). But we // must check the flag and potentially wait atomically to avoid the race where we read // the flag and the target thread sets it and signals before we have a chance to wait. status = pthread_mutex_lock(&m_mutexSusp); if (status != 0) { ASSERT("pthread_mutex_lock returned %d (%s)\n", status, strerror(status)); } // If the target has already acknowledged the suspend we shouldn't wait. while (!m_fSuspended) { // We got here before the target could signal. Wait on them (which atomically releases // the mutex during the wait). status = pthread_cond_wait(&m_condSusp, &m_mutexSusp); if (status != 0) { ASSERT("pthread_cond_wait returned %d (%s)\n", status, strerror(status)); } } status = pthread_mutex_unlock(&m_mutexSusp); if (status != 0) { ASSERT("pthread_mutex_unlock returned %d (%s)\n", status, strerror(status)); } #endif // USE_POSIX_SEMAPHORES } /*++ Function: PostOnResumeSemaphore PostOnResumeSemaphore is a utility function for a thread to post on its POSIX or SysV resume semaphore. --*/ void CThreadSuspensionInfo::PostOnResumeSemaphore() { #if USE_POSIX_SEMAPHORES if (sem_post(&m_semResume) == -1) { ASSERT("sem_post returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_SYSV_SEMAPHORES if (semop(m_nSemrespid, &m_sbSempost, 1) == -1) { ASSERT("semop - post returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_PTHREAD_CONDVARS int status; // The resuming thread may not have entered the wait yet, in which case the cond var // signal below will be a no-op. To prevent the race condition we set m_fResumed to // TRUE first (which the resumer will take as an indication that no wait is required). // But the setting of the flag and the signal must appear atomic to the resumer (as // reading the flag and potentially waiting must appear to us) to avoid the race // condition where the resumer reads the flag as FALSE, we set it and signal and the // resumer then waits. // Acquire the resume mutex. Once we enter the critical section the resumer has // either gotten there before us (and is waiting for our signal) or is yet to even // check the flag (so we can set it here to stop them attempting a wait). status = pthread_mutex_lock(&m_mutexResume); if (status != 0) { ASSERT("pthread_mutex_lock returned %d (%s)\n", status, strerror(status)); } m_fResumed = TRUE; status = pthread_cond_signal(&m_condResume); if (status != 0) { ASSERT("pthread_cond_signal returned %d (%s)\n", status, strerror(status)); } status = pthread_mutex_unlock(&m_mutexResume); if (status != 0) { ASSERT("pthread_mutex_unlock returned %d (%s)\n", status, strerror(status)); } #endif // USE_POSIX_SEMAPHORES } /*++ Function: WaitOnResumeSemaphore WaitOnResumeSemaphore is a utility function for a thread to wait on its POSIX or SysV resume semaphore. --*/ void CThreadSuspensionInfo::WaitOnResumeSemaphore() { #if USE_POSIX_SEMAPHORES while (sem_wait(&m_semResume) == -1) { ASSERT("sem_wait returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_SYSV_SEMAPHORES while (semop(m_nSemrespid, &m_sbSemwait, 1) == -1) { ASSERT("semop wait returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_PTHREAD_CONDVARS int status; // By the time we wait the target thread may have already signalled its resumption (in // which case m_fResumed will be TRUE and we shouldn't wait on the cond var). But we // must check the flag and potentially wait atomically to avoid the race where we read // the flag and the target thread sets it and signals before we have a chance to wait. status = pthread_mutex_lock(&m_mutexResume); if (status != 0) { ASSERT("pthread_mutex_lock returned %d (%s)\n", status, strerror(status)); } // If the target has already acknowledged the resume we shouldn't wait. while (!m_fResumed) { // We got here before the target could signal. Wait on them (which atomically releases // the mutex during the wait). status = pthread_cond_wait(&m_condResume, &m_mutexResume); if (status != 0) { ASSERT("pthread_cond_wait returned %d (%s)\n", status, strerror(status)); } } status = pthread_mutex_unlock(&m_mutexResume); if (status != 0) { ASSERT("pthread_mutex_unlock returned %d (%s)\n", status, strerror(status)); } #endif // USE_POSIX_SEMAPHORES } /*++ Function: InitializeSuspensionLock InitializeSuspensionLock initializes a thread's suspension spinlock or suspension mutex. It is called from the CThreadSuspensionInfo constructor. --*/ VOID CThreadSuspensionInfo::InitializeSuspensionLock() { #if DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX SPINLOCKInit(&m_nSpinlock); #else int iError = pthread_mutex_init(&m_ptmSuspmutex, NULL); if (0 != iError ) { ASSERT("pthread_mutex_init(&suspmutex) returned %d\n", iError); return; } m_fSuspmutexInitialized = TRUE; #endif // DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX } /*++ Function: InitializePreCreate InitializePreCreate initializes the semaphores and signal masks used for thread suspension. At the end, it sets the calling thread's signal mask to the default signal mask. --*/ PAL_ERROR CThreadSuspensionInfo::InitializePreCreate() { PAL_ERROR palError = ERROR_INTERNAL_ERROR; int iError = 0; #if SEM_INIT_MODIFIES_ERRNO int nStoredErrno; #endif // SEM_INIT_MODIFIES_ERRNO #if USE_POSIX_SEMAPHORES #if SEM_INIT_MODIFIES_ERRNO nStoredErrno = errno; #endif // SEM_INIT_MODIFIES_ERRNO // initialize suspension semaphore iError = sem_init(&m_semSusp, 0, 0); #if SEM_INIT_MODIFIES_ERRNO if (iError == 0) { // Restore errno if sem_init succeeded. errno = nStoredErrno; } #endif // SEM_INIT_MODIFIES_ERRNO if (0 != iError ) { ASSERT("sem_init(&suspsem) returned %d\n", iError); goto InitializePreCreateExit; } #if SEM_INIT_MODIFIES_ERRNO nStoredErrno = errno; #endif // SEM_INIT_MODIFIES_ERRNO // initialize resume semaphore iError = sem_init(&m_semResume, 0, 0); #if SEM_INIT_MODIFIES_ERRNO if (iError == 0) { // Restore errno if sem_init succeeded. errno = nStoredErrno; } #endif // SEM_INIT_MODIFIES_ERRNO if (0 != iError ) { ASSERT("sem_init(&suspsem) returned %d\n", iError); sem_destroy(&m_semSusp); goto InitializePreCreateExit; } m_fSemaphoresInitialized = TRUE; #elif USE_SYSV_SEMAPHORES // preparing to initialize the SysV semaphores. union semun semunData; m_nSemsuspid = semget(IPC_PRIVATE, 1, IPC_CREAT | 0666); if (m_nSemsuspid == -1) { ASSERT("semget for suspension sem id returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); goto InitializePreCreateExit; } m_nSemrespid = semget(IPC_PRIVATE, 1, IPC_CREAT | 0666); if (m_nSemrespid == -1) { ASSERT("semget for resumption sem id returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); goto InitializePreCreateExit; } if (m_nSemsuspid == m_nSemrespid) { ASSERT("Suspension and Resumption Semaphores have the same id\n"); goto InitializePreCreateExit; } semunData.val = 0; iError = semctl(m_nSemsuspid, 0, SETVAL, semunData); if (iError == -1) { ASSERT("semctl for suspension sem id returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); goto InitializePreCreateExit; } semunData.val = 0; iError = semctl(m_nSemrespid, 0, SETVAL, semunData); if (iError == -1) { ASSERT("semctl for resumption sem id returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); goto InitializePreCreateExit; } // initialize suspend semaphore m_sbSemwait.sem_num = 0; m_sbSemwait.sem_op = -1; m_sbSemwait.sem_flg = 0; // initialize resume semaphore m_sbSempost.sem_num = 0; m_sbSempost.sem_op = 1; m_sbSempost.sem_flg = 0; #elif USE_PTHREAD_CONDVARS iError = pthread_cond_init(&m_condSusp, NULL); if (iError != 0) { ASSERT("pthread_cond_init for suspension returned %d (%s)\n", iError, strerror(iError)); goto InitializePreCreateExit; } iError = pthread_mutex_init(&m_mutexSusp, NULL); if (iError != 0) { ASSERT("pthread_mutex_init for suspension returned %d (%s)\n", iError, strerror(iError)); goto InitializePreCreateExit; } iError = pthread_cond_init(&m_condResume, NULL); if (iError != 0) { ASSERT("pthread_cond_init for resume returned %d (%s)\n", iError, strerror(iError)); goto InitializePreCreateExit; } iError = pthread_mutex_init(&m_mutexResume, NULL); if (iError != 0) { ASSERT("pthread_mutex_init for resume returned %d (%s)\n", iError, strerror(iError)); goto InitializePreCreateExit; } m_fSemaphoresInitialized = TRUE; #endif // USE_POSIX_SEMAPHORES // Initialization was successful. palError = NO_ERROR; InitializePreCreateExit: if (NO_ERROR == palError && 0 != iError) { switch (iError) { case ENOMEM: case EAGAIN: { palError = ERROR_OUTOFMEMORY; break; } default: { ASSERT("A pthrSuspender init call returned %d (%s)\n", iError, strerror(iError)); palError = ERROR_INTERNAL_ERROR; } } } return palError; } CThreadSuspensionInfo::~CThreadSuspensionInfo() { #if !DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX if (m_fSuspmutexInitialized) { INDEBUG(int iError = ) pthread_mutex_destroy(&m_ptmSuspmutex); _ASSERT_MSG(0 == iError, "pthread_mutex_destroy returned %d (%s)\n", iError, strerror(iError)); } #endif #if USE_POSIX_SEMAPHORES if (m_fSemaphoresInitialized) { int iError; iError = sem_destroy(&m_semSusp); _ASSERT_MSG(0 == iError, "sem_destroy failed and set errno to %d (%s)\n", errno, strerror(errno)); iError = sem_destroy(&m_semResume); _ASSERT_MSG(0 == iError, "sem_destroy failed and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_SYSV_SEMAPHORES DestroySemaphoreIds(); #elif USE_PTHREAD_CONDVARS if (m_fSemaphoresInitialized) { int iError; iError = pthread_cond_destroy(&m_condSusp); _ASSERT_MSG(0 == iError, "pthread_cond_destroy failed with %d (%s)\n", iError, strerror(iError)); iError = pthread_mutex_destroy(&m_mutexSusp); _ASSERT_MSG(0 == iError, "pthread_mutex_destroy failed with %d (%s)\n", iError, strerror(iError)); iError = pthread_cond_destroy(&m_condResume); _ASSERT_MSG(0 == iError, "pthread_cond_destroy failed with %d (%s)\n", iError, strerror(iError)); iError = pthread_mutex_destroy(&m_mutexResume); _ASSERT_MSG(0 == iError, "pthread_mutex_destroy failed with %d (%s)\n", iError, strerror(iError)); } #endif // USE_POSIX_SEMAPHORES } #if USE_SYSV_SEMAPHORES /*++ Function: DestroySemaphoreIds DestroySemaphoreIds is called from the CThreadSuspensionInfo destructor and from PROCCleanupThreadSemIds. If a thread exits before shutdown or is suspended during shutdown, its destructor will be invoked and the semaphore ids destroyed. In assert or exceptions situations that are suspension unsafe, PROCCleanupThreadSemIds is called, which uses DestroySemaphoreIds. --*/ void CThreadSuspensionInfo::DestroySemaphoreIds() { union semun semunData; if (m_nSemsuspid != 0) { semunData.val = 0; if (0 != semctl(m_nSemsuspid, 0, IPC_RMID, semunData)) { ERROR("semctl(Semsuspid) failed and set errno to %d (%s)\n", errno, strerror(errno)); } else { m_nSemsuspid = 0; } } if (this->m_nSemrespid) { semunData.val = 0; if (0 != semctl(m_nSemrespid, 0, IPC_RMID, semunData)) { ERROR("semctl(Semrespid) failed and set errno to %d (%s)\n", errno, strerror(errno)); } else { m_nSemrespid = 0; } } } #endif // USE_SYSV_SEMAPHORES
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*++ Module Name: threadsusp.cpp Abstract: Implementation of functions related to threads. Revision History: --*/ #include "pal/corunix.hpp" #include "pal/thread.hpp" #include "pal/mutex.hpp" #include "pal/seh.hpp" #include "pal/init.h" #include "pal/dbgmsg.h" #include <pthread.h> #include <unistd.h> #include <errno.h> #include <stddef.h> #include <sys/stat.h> #include <limits.h> #include <debugmacrosext.h> using namespace CorUnix; /* ------------------- Definitions ------------------------------*/ SET_DEFAULT_DEBUG_CHANNEL(THREAD); /* This code is written to the blocking pipe of a thread that was created in suspended state in order to resume it. */ CONST BYTE WAKEUPCODE=0x2A; // #define USE_GLOBAL_LOCK_FOR_SUSPENSION // Uncomment this define to use the global suspension lock. /* The global suspension lock can be used in place of each thread having its own suspension mutex or spinlock. The downside is that it restricts us to only performing one suspension or resumption in the PAL at a time. */ #ifdef USE_GLOBAL_LOCK_FOR_SUSPENSION static LONG g_ssSuspensionLock = 0; #endif /*++ Function: InternalSuspendNewThreadFromData On platforms where we use pipes for starting threads suspended, this function sets the blocking pipe for the thread and blocks until the wakeup code is written to the pipe by ResumeThread. --*/ PAL_ERROR CThreadSuspensionInfo::InternalSuspendNewThreadFromData( CPalThread *pThread ) { PAL_ERROR palError = NO_ERROR; AcquireSuspensionLock(pThread); pThread->suspensionInfo.SetSelfSusp(TRUE); ReleaseSuspensionLock(pThread); int pipe_descs[2]; int pipeRv = #if HAVE_PIPE2 pipe2(pipe_descs, O_CLOEXEC); #else pipe(pipe_descs); #endif // HAVE_PIPE2 if (pipeRv == -1) { ERROR("pipe() failed! error is %d (%s)\n", errno, strerror(errno)); return ERROR_NOT_ENOUGH_MEMORY; } #if !HAVE_PIPE2 fcntl(pipe_descs[0], F_SETFD, FD_CLOEXEC); // make pipe non-inheritable, if possible fcntl(pipe_descs[1], F_SETFD, FD_CLOEXEC); #endif // !HAVE_PIPE2 // [0] is the read end of the pipe, and [1] is the write end. pThread->suspensionInfo.SetBlockingPipe(pipe_descs[1]); pThread->SetStartStatus(TRUE); BYTE resume_code = 0; ssize_t read_ret; // Block until ResumeThread writes something to the pipe while ((read_ret = read(pipe_descs[0], &resume_code, sizeof(resume_code))) != sizeof(resume_code)) { if (read_ret != -1 || EINTR != errno) { // read might return 0 (with EAGAIN) if the other end of the pipe gets closed palError = ERROR_INTERNAL_ERROR; break; } } if (palError == NO_ERROR && resume_code != WAKEUPCODE) { // If we did read successfully but the byte didn't match WAKEUPCODE, we treat it as a failure. palError = ERROR_INTERNAL_ERROR; } if (palError == NO_ERROR) { AcquireSuspensionLock(pThread); pThread->suspensionInfo.SetSelfSusp(FALSE); ReleaseSuspensionLock(pThread); } // Close the pipes regardless of whether we were successful. close(pipe_descs[0]); close(pipe_descs[1]); return palError; } /*++ Function: ResumeThread See MSDN doc. --*/ DWORD PALAPI ResumeThread( IN HANDLE hThread ) { PAL_ERROR palError; CPalThread *pthrResumer; DWORD dwSuspendCount = (DWORD)-1; PERF_ENTRY(ResumeThread); ENTRY("ResumeThread(hThread=%p)\n", hThread); pthrResumer = InternalGetCurrentThread(); palError = InternalResumeThread( pthrResumer, hThread, &dwSuspendCount ); if (NO_ERROR != palError) { pthrResumer->SetLastError(palError); dwSuspendCount = (DWORD) -1; } else { _ASSERT_MSG(dwSuspendCount != static_cast<DWORD>(-1), "InternalResumeThread returned success but dwSuspendCount did not change.\n"); } LOGEXIT("ResumeThread returns DWORD %u\n", dwSuspendCount); PERF_EXIT(ResumeThread); return dwSuspendCount; } /*++ Function: InternalResumeThread InternalResumeThread converts the handle of the target thread to a CPalThread, and passes both the resumer and target thread references to InternalResumeThreadFromData. A reference to the suspend count from the resumption attempt is passed back to the caller of this function. --*/ PAL_ERROR CorUnix::InternalResumeThread( CPalThread *pthrResumer, HANDLE hTargetThread, DWORD *pdwSuspendCount ) { PAL_ERROR palError = NO_ERROR; CPalThread *pthrTarget = NULL; IPalObject *pobjThread = NULL; palError = InternalGetThreadDataFromHandle( pthrResumer, hTargetThread, &pthrTarget, &pobjThread ); if (NO_ERROR == palError) { palError = pthrResumer->suspensionInfo.InternalResumeThreadFromData( pthrResumer, pthrTarget, pdwSuspendCount ); } if (NULL != pobjThread) { pobjThread->ReleaseReference(pthrResumer); } return palError; } /*++ Function: InternalResumeThreadFromData InternalResumeThreadFromData resumes the target thread. First, the suspension mutexes of the threads are acquired. Next, there's a check to ensure that the target thread was actually suspended. Finally, the resume attempt is made and the suspension mutexes are released. The suspend count of the target thread is passed back to the caller of this function. Note that ReleaseSuspensionLock(s) is called before hitting ASSERTs in error paths. Currently, this seems unnecessary since asserting within InternalResumeThreadFromData will not cause cleanup to occur. However, this may change since it would be preferable to perform cleanup. Thus, calls to release suspension locks remain in the error paths. --*/ PAL_ERROR CThreadSuspensionInfo::InternalResumeThreadFromData( CPalThread *pthrResumer, CPalThread *pthrTarget, DWORD *pdwSuspendCount ) { PAL_ERROR palError = NO_ERROR; int nWrittenBytes = -1; if (SignalHandlerThread == pthrTarget->GetThreadType()) { ASSERT("Attempting to resume the signal handling thread, which can never be suspended.\n"); palError = ERROR_INVALID_HANDLE; goto InternalResumeThreadFromDataExit; } // Acquire suspension mutex AcquireSuspensionLocks(pthrResumer, pthrTarget); // Check target thread's state to ensure it hasn't died. // Setting a thread's state to TS_DONE is protected by the // target's suspension mutex. if (pthrTarget->synchronizationInfo.GetThreadState() == TS_DONE) { palError = ERROR_INVALID_HANDLE; ReleaseSuspensionLocks(pthrResumer, pthrTarget); goto InternalResumeThreadFromDataExit; } // If this is a dummy thread, then it represents a process that was created with CREATE_SUSPENDED // and it should have a blocking pipe set. If GetBlockingPipe returns -1 for a dummy thread, then // something is wrong - either CREATE_SUSPENDED wasn't used or the process was already resumed. if (pthrTarget->IsDummy() && -1 == pthrTarget->suspensionInfo.GetBlockingPipe()) { palError = ERROR_INVALID_HANDLE; ERROR("Tried to wake up dummy thread without a blocking pipe.\n"); ReleaseSuspensionLocks(pthrResumer, pthrTarget); goto InternalResumeThreadFromDataExit; } // If there is a blocking pipe on this thread, resume it by writing the wake up code to that pipe. if (-1 != pthrTarget->suspensionInfo.GetBlockingPipe()) { // If write() is interrupted by a signal before writing data, // it returns -1 and sets errno to EINTR. In this case, we // attempt the write() again. writeAgain: nWrittenBytes = write(pthrTarget->suspensionInfo.GetBlockingPipe(), &WAKEUPCODE, sizeof(WAKEUPCODE)); // The size of WAKEUPCODE is 1 byte. If write returns 0, we'll treat it as an error. if (sizeof(WAKEUPCODE) != nWrittenBytes) { // If we are here during process creation, this is most likely caused by the target // process dying before reaching this point and thus breaking the pipe. if (nWrittenBytes == -1 && EPIPE == errno) { palError = ERROR_INVALID_HANDLE; ReleaseSuspensionLocks(pthrResumer, pthrTarget); ERROR("Write failed with EPIPE\n"); goto InternalResumeThreadFromDataExit; } else if (nWrittenBytes == 0 || (nWrittenBytes == -1 && EINTR == errno)) { TRACE("write() failed with EINTR; re-attempting write\n"); goto writeAgain; } else { // Some other error occurred; need to release suspension mutexes before leaving ResumeThread. palError = ERROR_INTERNAL_ERROR; ReleaseSuspensionLocks(pthrResumer, pthrTarget); ASSERT("Write() failed; error is %d (%s)\n", errno, strerror(errno)); goto InternalResumeThreadFromDataExit; } } // Reset blocking pipe to -1 since we're done using it. pthrTarget->suspensionInfo.SetBlockingPipe(-1); ReleaseSuspensionLocks(pthrResumer, pthrTarget); goto InternalResumeThreadFromDataExit; } else { *pdwSuspendCount = 0; palError = ERROR_BAD_COMMAND; } InternalResumeThreadFromDataExit: if (NO_ERROR == palError) { *pdwSuspendCount = 1; } return palError; } /*++ Function: TryAcquireSuspensionLock TryAcquireSuspensionLock is a utility function that tries to acquire a thread's suspension mutex or spinlock. If it succeeds, the function returns TRUE. Otherwise, it returns FALSE. This function is used in AcquireSuspensionLocks. Note that the global lock cannot be acquired in this function since it makes no sense to do so. A thread holding the global lock is the only thread that can perform suspend or resume operations so it doesn't need to acquire a second lock. --*/ BOOL CThreadSuspensionInfo::TryAcquireSuspensionLock( CPalThread* pthrTarget ) { int iPthreadRet = 0; #if DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX { iPthreadRet = SPINLOCKTryAcquire(pthrTarget->suspensionInfo.GetSuspensionSpinlock()); } #else // DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX { iPthreadRet = pthread_mutex_trylock(pthrTarget->suspensionInfo.GetSuspensionMutex()); _ASSERT_MSG(iPthreadRet == 0 || iPthreadRet == EBUSY, "pthread_mutex_trylock returned %d\n", iPthreadRet); } #endif // DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX // If iPthreadRet is 0, lock acquisition was successful. Otherwise, it failed. return (iPthreadRet == 0); } /*++ Function: AcquireSuspensionLock AcquireSuspensionLock acquires a thread's suspension mutex or spinlock. If USE_GLOBAL_LOCK_FOR_SUSPENSION is defined, it will acquire the global lock. A thread in this function blocks until it acquires its lock, unlike in TryAcquireSuspensionLock. --*/ void CThreadSuspensionInfo::AcquireSuspensionLock( CPalThread* pthrCurrent ) { #ifdef USE_GLOBAL_LOCK_FOR_SUSPENSION { SPINLOCKAcquire(&g_ssSuspensionLock, 0); } #else // USE_GLOBAL_LOCK_FOR_SUSPENSION { #if DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX { SPINLOCKAcquire(&pthrCurrent->suspensionInfo.m_nSpinlock, 0); } #else // DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX { INDEBUG(int iPthreadError = ) pthread_mutex_lock(&pthrCurrent->suspensionInfo.m_ptmSuspmutex); _ASSERT_MSG(iPthreadError == 0, "pthread_mutex_lock returned %d\n", iPthreadError); } #endif // DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX } #endif // USE_GLOBAL_LOCK_FOR_SUSPENSION } /*++ Function: ReleaseSuspensionLock ReleaseSuspensionLock is a function that releases a thread's suspension mutex or spinlock. If USE_GLOBAL_LOCK_FOR_SUSPENSION is defined, it will release the global lock. --*/ void CThreadSuspensionInfo::ReleaseSuspensionLock( CPalThread* pthrCurrent ) { #ifdef USE_GLOBAL_LOCK_FOR_SUSPENSION { SPINLOCKRelease(&g_ssSuspensionLock); } #else // USE_GLOBAL_LOCK_FOR_SUSPENSION { #if DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX { SPINLOCKRelease(&pthrCurrent->suspensionInfo.m_nSpinlock); } #else // DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX { INDEBUG(int iPthreadError = ) pthread_mutex_unlock(&pthrCurrent->suspensionInfo.m_ptmSuspmutex); _ASSERT_MSG(iPthreadError == 0, "pthread_mutex_unlock returned %d\n", iPthreadError); } #endif // DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX } #endif // USE_GLOBAL_LOCK_FOR_SUSPENSION } /*++ Function: AcquireSuspensionLocks AcquireSuspensionLocks is used to acquire the suspension locks of a suspender (or resumer) and target thread. The thread will perform a blocking call to acquire its own suspension lock and will then try to acquire the target thread's lock without blocking. If it fails to acquire the target's lock, it releases its own lock and the thread will try to acquire both locks again. The key is that both locks must be acquired together. Originally, only blocking calls were used to acquire the suspender and the target lock. However, this was problematic since a thread could acquire its own lock and then block on acquiring the target lock. In the meantime, the target could have already acquired its own lock and be attempting to suspend the suspender thread. This clearly causes deadlock. A second approach used locking hierarchies, where locks were acquired use thread id ordering. This was better but suffered from the scenario where thread A acquires thread B's suspension mutex first. In the meantime, thread C acquires thread A's suspension mutex and its own. Thus, thread A is suspended while holding thread B's mutex. This is problematic if thread C now wants to suspend thread B. The issue here is that a thread can be suspended while holding someone else's mutex but not holding its own. In the end, the correct approach is to always acquire your suspension mutex first. This prevents you from being suspended while holding the target's mutex. Then, attempt to acquire the target's mutex. If the mutex cannot be acquired, release your own and try again. This all or nothing approach is the safest and avoids nasty race conditions. If USE_GLOBAL_LOCK_FOR_SUSPENSION is defined, the calling thread will acquire the global lock when possible. --*/ VOID CThreadSuspensionInfo::AcquireSuspensionLocks( CPalThread *pthrSuspender, CPalThread *pthrTarget ) { BOOL fReacquire = FALSE; #ifdef USE_GLOBAL_LOCK_FOR_SUSPENSION AcquireSuspensionLock(pthrSuspender); #else // USE_GLOBAL_LOCK_FOR_SUSPENSION do { fReacquire = FALSE; AcquireSuspensionLock(pthrSuspender); if (!TryAcquireSuspensionLock(pthrTarget)) { // pthread_mutex_trylock returned EBUSY so release the first lock and try again. ReleaseSuspensionLock(pthrSuspender); fReacquire = TRUE; sched_yield(); } } while (fReacquire); #endif // USE_GLOBAL_LOCK_FOR_SUSPENSION // Whenever the native implementation for the wait subsystem's thread // blocking requires a lock as protection (as pthread conditions do with // the associated mutex), we need to grab that lock to prevent the target // thread from being suspended while holding the lock. // Failing to do so can lead to a multiple threads deadlocking such as the // one described in VSW 363793. // In general, in similar scenarios, we need to grab the protecting lock // every time suspension safety/unsafety is unbalanced on the two sides // using the same condition (or any other native blocking support which // needs an associated native lock), i.e. when either the signaling // thread(s) is(are) signaling from an unsafe area and the waiting // thread(s) is(are) waiting from a safe one, or vice versa (the scenario // described in VSW 363793 is a good example of the first type of // unbalanced suspension safety/unsafety). // Instead, whenever signaling and waiting sides are both marked safe or // unsafe, the deadlock cannot take place since either the suspending // thread will suspend them anyway (regardless of the native lock), or it // won't suspend any of them, since they are both marked unsafe. // Such a balanced scenario applies, for instance, to critical sections // where depending on whether the target CS is internal or not, both the // signaling and the waiting side will access the mutex/condition from // respectively an unsafe or safe region. pthrTarget->AcquireNativeWaitLock(); } /*++ Function: ReleaseSuspensionLocks ReleaseSuspensionLocks releases both thread's suspension mutexes. Note that the locks are released in the opposite order they're acquired. This prevents a suspending or resuming thread from being suspended while holding the target's lock. If USE_GLOBAL_LOCK_FOR_SUSPENSION is defined, it simply releases the global lock. --*/ VOID CThreadSuspensionInfo::ReleaseSuspensionLocks( CPalThread *pthrSuspender, CPalThread *pthrTarget ) { // See comment in AcquireSuspensionLocks pthrTarget->ReleaseNativeWaitLock(); #ifdef USE_GLOBAL_LOCK_FOR_SUSPENSION ReleaseSuspensionLock(pthrSuspender); #else // USE_GLOBAL_LOCK_FOR_SUSPENSION ReleaseSuspensionLock(pthrTarget); ReleaseSuspensionLock(pthrSuspender); #endif // USE_GLOBAL_LOCK_FOR_SUSPENSION } /*++ Function: PostOnSuspendSemaphore PostOnSuspendSemaphore is a utility function for a thread to post on its POSIX or SysV suspension semaphore. --*/ void CThreadSuspensionInfo::PostOnSuspendSemaphore() { #if USE_POSIX_SEMAPHORES if (sem_post(&m_semSusp) == -1) { ASSERT("sem_post returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_SYSV_SEMAPHORES if (semop(m_nSemsuspid, &m_sbSempost, 1) == -1) { ASSERT("semop - post returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_PTHREAD_CONDVARS int status; // The suspending thread may not have entered the wait yet, in which case the cond var // signal below will be a no-op. To prevent the race condition we set m_fSuspended to // TRUE first (which the suspender will take as an indication that no wait is required). // But the setting of the flag and the signal must appear atomic to the suspender (as // reading the flag and potentially waiting must appear to us) to avoid the race // condition where the suspender reads the flag as FALSE, we set it and signal and the // suspender then waits. // Acquire the suspend mutex. Once we enter the critical section the suspender has // either gotten there before us (and is waiting for our signal) or is yet to even // check the flag (so we can set it here to stop them attempting a wait). status = pthread_mutex_lock(&m_mutexSusp); if (status != 0) { ASSERT("pthread_mutex_lock returned %d (%s)\n", status, strerror(status)); } m_fSuspended = TRUE; status = pthread_cond_signal(&m_condSusp); if (status != 0) { ASSERT("pthread_cond_signal returned %d (%s)\n", status, strerror(status)); } status = pthread_mutex_unlock(&m_mutexSusp); if (status != 0) { ASSERT("pthread_mutex_unlock returned %d (%s)\n", status, strerror(status)); } #endif // USE_POSIX_SEMAPHORES } /*++ Function: WaitOnSuspendSemaphore WaitOnSuspendSemaphore is a utility function for a thread to wait on its POSIX or SysV suspension semaphore. --*/ void CThreadSuspensionInfo::WaitOnSuspendSemaphore() { #if USE_POSIX_SEMAPHORES while (sem_wait(&m_semSusp) == -1) { ASSERT("sem_wait returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_SYSV_SEMAPHORES while (semop(m_nSemsuspid, &m_sbSemwait, 1) == -1) { ASSERT("semop wait returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_PTHREAD_CONDVARS int status; // By the time we wait the target thread may have already signalled its suspension (in // which case m_fSuspended will be TRUE and we shouldn't wait on the cond var). But we // must check the flag and potentially wait atomically to avoid the race where we read // the flag and the target thread sets it and signals before we have a chance to wait. status = pthread_mutex_lock(&m_mutexSusp); if (status != 0) { ASSERT("pthread_mutex_lock returned %d (%s)\n", status, strerror(status)); } // If the target has already acknowledged the suspend we shouldn't wait. while (!m_fSuspended) { // We got here before the target could signal. Wait on them (which atomically releases // the mutex during the wait). status = pthread_cond_wait(&m_condSusp, &m_mutexSusp); if (status != 0) { ASSERT("pthread_cond_wait returned %d (%s)\n", status, strerror(status)); } } status = pthread_mutex_unlock(&m_mutexSusp); if (status != 0) { ASSERT("pthread_mutex_unlock returned %d (%s)\n", status, strerror(status)); } #endif // USE_POSIX_SEMAPHORES } /*++ Function: PostOnResumeSemaphore PostOnResumeSemaphore is a utility function for a thread to post on its POSIX or SysV resume semaphore. --*/ void CThreadSuspensionInfo::PostOnResumeSemaphore() { #if USE_POSIX_SEMAPHORES if (sem_post(&m_semResume) == -1) { ASSERT("sem_post returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_SYSV_SEMAPHORES if (semop(m_nSemrespid, &m_sbSempost, 1) == -1) { ASSERT("semop - post returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_PTHREAD_CONDVARS int status; // The resuming thread may not have entered the wait yet, in which case the cond var // signal below will be a no-op. To prevent the race condition we set m_fResumed to // TRUE first (which the resumer will take as an indication that no wait is required). // But the setting of the flag and the signal must appear atomic to the resumer (as // reading the flag and potentially waiting must appear to us) to avoid the race // condition where the resumer reads the flag as FALSE, we set it and signal and the // resumer then waits. // Acquire the resume mutex. Once we enter the critical section the resumer has // either gotten there before us (and is waiting for our signal) or is yet to even // check the flag (so we can set it here to stop them attempting a wait). status = pthread_mutex_lock(&m_mutexResume); if (status != 0) { ASSERT("pthread_mutex_lock returned %d (%s)\n", status, strerror(status)); } m_fResumed = TRUE; status = pthread_cond_signal(&m_condResume); if (status != 0) { ASSERT("pthread_cond_signal returned %d (%s)\n", status, strerror(status)); } status = pthread_mutex_unlock(&m_mutexResume); if (status != 0) { ASSERT("pthread_mutex_unlock returned %d (%s)\n", status, strerror(status)); } #endif // USE_POSIX_SEMAPHORES } /*++ Function: WaitOnResumeSemaphore WaitOnResumeSemaphore is a utility function for a thread to wait on its POSIX or SysV resume semaphore. --*/ void CThreadSuspensionInfo::WaitOnResumeSemaphore() { #if USE_POSIX_SEMAPHORES while (sem_wait(&m_semResume) == -1) { ASSERT("sem_wait returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_SYSV_SEMAPHORES while (semop(m_nSemrespid, &m_sbSemwait, 1) == -1) { ASSERT("semop wait returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_PTHREAD_CONDVARS int status; // By the time we wait the target thread may have already signalled its resumption (in // which case m_fResumed will be TRUE and we shouldn't wait on the cond var). But we // must check the flag and potentially wait atomically to avoid the race where we read // the flag and the target thread sets it and signals before we have a chance to wait. status = pthread_mutex_lock(&m_mutexResume); if (status != 0) { ASSERT("pthread_mutex_lock returned %d (%s)\n", status, strerror(status)); } // If the target has already acknowledged the resume we shouldn't wait. while (!m_fResumed) { // We got here before the target could signal. Wait on them (which atomically releases // the mutex during the wait). status = pthread_cond_wait(&m_condResume, &m_mutexResume); if (status != 0) { ASSERT("pthread_cond_wait returned %d (%s)\n", status, strerror(status)); } } status = pthread_mutex_unlock(&m_mutexResume); if (status != 0) { ASSERT("pthread_mutex_unlock returned %d (%s)\n", status, strerror(status)); } #endif // USE_POSIX_SEMAPHORES } /*++ Function: InitializeSuspensionLock InitializeSuspensionLock initializes a thread's suspension spinlock or suspension mutex. It is called from the CThreadSuspensionInfo constructor. --*/ VOID CThreadSuspensionInfo::InitializeSuspensionLock() { #if DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX SPINLOCKInit(&m_nSpinlock); #else int iError = pthread_mutex_init(&m_ptmSuspmutex, NULL); if (0 != iError ) { ASSERT("pthread_mutex_init(&suspmutex) returned %d\n", iError); return; } m_fSuspmutexInitialized = TRUE; #endif // DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX } /*++ Function: InitializePreCreate InitializePreCreate initializes the semaphores and signal masks used for thread suspension. At the end, it sets the calling thread's signal mask to the default signal mask. --*/ PAL_ERROR CThreadSuspensionInfo::InitializePreCreate() { PAL_ERROR palError = ERROR_INTERNAL_ERROR; int iError = 0; #if SEM_INIT_MODIFIES_ERRNO int nStoredErrno; #endif // SEM_INIT_MODIFIES_ERRNO #if USE_POSIX_SEMAPHORES #if SEM_INIT_MODIFIES_ERRNO nStoredErrno = errno; #endif // SEM_INIT_MODIFIES_ERRNO // initialize suspension semaphore iError = sem_init(&m_semSusp, 0, 0); #if SEM_INIT_MODIFIES_ERRNO if (iError == 0) { // Restore errno if sem_init succeeded. errno = nStoredErrno; } #endif // SEM_INIT_MODIFIES_ERRNO if (0 != iError ) { ASSERT("sem_init(&suspsem) returned %d\n", iError); goto InitializePreCreateExit; } #if SEM_INIT_MODIFIES_ERRNO nStoredErrno = errno; #endif // SEM_INIT_MODIFIES_ERRNO // initialize resume semaphore iError = sem_init(&m_semResume, 0, 0); #if SEM_INIT_MODIFIES_ERRNO if (iError == 0) { // Restore errno if sem_init succeeded. errno = nStoredErrno; } #endif // SEM_INIT_MODIFIES_ERRNO if (0 != iError ) { ASSERT("sem_init(&suspsem) returned %d\n", iError); sem_destroy(&m_semSusp); goto InitializePreCreateExit; } m_fSemaphoresInitialized = TRUE; #elif USE_SYSV_SEMAPHORES // preparing to initialize the SysV semaphores. union semun semunData; m_nSemsuspid = semget(IPC_PRIVATE, 1, IPC_CREAT | 0666); if (m_nSemsuspid == -1) { ASSERT("semget for suspension sem id returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); goto InitializePreCreateExit; } m_nSemrespid = semget(IPC_PRIVATE, 1, IPC_CREAT | 0666); if (m_nSemrespid == -1) { ASSERT("semget for resumption sem id returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); goto InitializePreCreateExit; } if (m_nSemsuspid == m_nSemrespid) { ASSERT("Suspension and Resumption Semaphores have the same id\n"); goto InitializePreCreateExit; } semunData.val = 0; iError = semctl(m_nSemsuspid, 0, SETVAL, semunData); if (iError == -1) { ASSERT("semctl for suspension sem id returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); goto InitializePreCreateExit; } semunData.val = 0; iError = semctl(m_nSemrespid, 0, SETVAL, semunData); if (iError == -1) { ASSERT("semctl for resumption sem id returned -1 and set errno to %d (%s)\n", errno, strerror(errno)); goto InitializePreCreateExit; } // initialize suspend semaphore m_sbSemwait.sem_num = 0; m_sbSemwait.sem_op = -1; m_sbSemwait.sem_flg = 0; // initialize resume semaphore m_sbSempost.sem_num = 0; m_sbSempost.sem_op = 1; m_sbSempost.sem_flg = 0; #elif USE_PTHREAD_CONDVARS iError = pthread_cond_init(&m_condSusp, NULL); if (iError != 0) { ASSERT("pthread_cond_init for suspension returned %d (%s)\n", iError, strerror(iError)); goto InitializePreCreateExit; } iError = pthread_mutex_init(&m_mutexSusp, NULL); if (iError != 0) { ASSERT("pthread_mutex_init for suspension returned %d (%s)\n", iError, strerror(iError)); goto InitializePreCreateExit; } iError = pthread_cond_init(&m_condResume, NULL); if (iError != 0) { ASSERT("pthread_cond_init for resume returned %d (%s)\n", iError, strerror(iError)); goto InitializePreCreateExit; } iError = pthread_mutex_init(&m_mutexResume, NULL); if (iError != 0) { ASSERT("pthread_mutex_init for resume returned %d (%s)\n", iError, strerror(iError)); goto InitializePreCreateExit; } m_fSemaphoresInitialized = TRUE; #endif // USE_POSIX_SEMAPHORES // Initialization was successful. palError = NO_ERROR; InitializePreCreateExit: if (NO_ERROR == palError && 0 != iError) { switch (iError) { case ENOMEM: case EAGAIN: { palError = ERROR_OUTOFMEMORY; break; } default: { ASSERT("A pthrSuspender init call returned %d (%s)\n", iError, strerror(iError)); palError = ERROR_INTERNAL_ERROR; } } } return palError; } CThreadSuspensionInfo::~CThreadSuspensionInfo() { #if !DEADLOCK_WHEN_THREAD_IS_SUSPENDED_WHILE_BLOCKED_ON_MUTEX if (m_fSuspmutexInitialized) { INDEBUG(int iError = ) pthread_mutex_destroy(&m_ptmSuspmutex); _ASSERT_MSG(0 == iError, "pthread_mutex_destroy returned %d (%s)\n", iError, strerror(iError)); } #endif #if USE_POSIX_SEMAPHORES if (m_fSemaphoresInitialized) { int iError; iError = sem_destroy(&m_semSusp); _ASSERT_MSG(0 == iError, "sem_destroy failed and set errno to %d (%s)\n", errno, strerror(errno)); iError = sem_destroy(&m_semResume); _ASSERT_MSG(0 == iError, "sem_destroy failed and set errno to %d (%s)\n", errno, strerror(errno)); } #elif USE_SYSV_SEMAPHORES DestroySemaphoreIds(); #elif USE_PTHREAD_CONDVARS if (m_fSemaphoresInitialized) { int iError; iError = pthread_cond_destroy(&m_condSusp); _ASSERT_MSG(0 == iError, "pthread_cond_destroy failed with %d (%s)\n", iError, strerror(iError)); iError = pthread_mutex_destroy(&m_mutexSusp); _ASSERT_MSG(0 == iError, "pthread_mutex_destroy failed with %d (%s)\n", iError, strerror(iError)); iError = pthread_cond_destroy(&m_condResume); _ASSERT_MSG(0 == iError, "pthread_cond_destroy failed with %d (%s)\n", iError, strerror(iError)); iError = pthread_mutex_destroy(&m_mutexResume); _ASSERT_MSG(0 == iError, "pthread_mutex_destroy failed with %d (%s)\n", iError, strerror(iError)); } #endif // USE_POSIX_SEMAPHORES } #if USE_SYSV_SEMAPHORES /*++ Function: DestroySemaphoreIds DestroySemaphoreIds is called from the CThreadSuspensionInfo destructor and from PROCCleanupThreadSemIds. If a thread exits before shutdown or is suspended during shutdown, its destructor will be invoked and the semaphore ids destroyed. In assert or exceptions situations that are suspension unsafe, PROCCleanupThreadSemIds is called, which uses DestroySemaphoreIds. --*/ void CThreadSuspensionInfo::DestroySemaphoreIds() { union semun semunData; if (m_nSemsuspid != 0) { semunData.val = 0; if (0 != semctl(m_nSemsuspid, 0, IPC_RMID, semunData)) { ERROR("semctl(Semsuspid) failed and set errno to %d (%s)\n", errno, strerror(errno)); } else { m_nSemsuspid = 0; } } if (this->m_nSemrespid) { semunData.val = 0; if (0 != semctl(m_nSemrespid, 0, IPC_RMID, semunData)) { ERROR("semctl(Semrespid) failed and set errno to %d (%s)\n", errno, strerror(errno)); } else { m_nSemrespid = 0; } } } #endif // USE_SYSV_SEMAPHORES
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/JIT/jit64/valuetypes/nullable/castclass/castclass/castclass013.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="castclass013.cs" /> <Compile Include="..\structdef.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="castclass013.cs" /> <Compile Include="..\structdef.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/JIT/IL_Conformance/Old/Conformance_Base/ldc_ret_i4.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } // //====================================== //---- CLASS ---------------- .class public _ret { //---- GLOBAL DATA ---------- //---- METHODS -------------- .method public static int32 ret_test_0(int32) { .maxstack 1 ldc.i4 0xAAAAAAAA ret } .method public static int32 ret_test_1(int32) { .maxstack 1 ldc.i4 0x55555555 ret } .method public static int32 ret_test_2(int32) { .maxstack 1 ldc.i4 0x7FFFFFFF ret } .method public static int32 ret_test_3(int32) { .maxstack 1 ldc.i4 0x00000001 ret } .method public static int32 ret_test_4(int32) { .maxstack 1 ldc.i4 0x00000000 ret } .method public static int32 ret_test_5(int32) { .maxstack 1 ldc.i4 0xFFFFFFFF ret } .method public static int32 ret_test_6(int32) { .maxstack 1 ldc.i4 0x80000000 ret } //---- CONSTRUCTOR ---------- .method public void _ret() { .maxstack 0 ret } //---- MAIN ----------------- .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 5 //====== begin testing ====== // -- Even ldc.i4 0xAAAAAAAA call int32 _ret::ret_test_0(int32) // -- Even ldc.i4 0xAAAAAAAA ceq brfalse FAIL // -- Odd ldc.i4 0x55555555 call int32 _ret::ret_test_1(int32) // -- Odd ldc.i4 0x55555555 ceq brfalse FAIL // -- Max ldc.i4 0x7FFFFFFF call int32 _ret::ret_test_2(int32) // -- Max ldc.i4 0x7FFFFFFF ceq brfalse FAIL // -- 1 ldc.i4 0x00000001 call int32 _ret::ret_test_3(int32) // -- 1 ldc.i4 0x00000001 ceq brfalse FAIL // -- 0 ldc.i4 0x00000000 call int32 _ret::ret_test_4(int32) // -- 0 ldc.i4 0x00000000 ceq brfalse FAIL // -- -1 ldc.i4 0xFFFFFFFF call int32 _ret::ret_test_5(int32) // -- -1 ldc.i4 0xFFFFFFFF ceq brfalse FAIL // -- Min ldc.i4 0x80000000 call int32 _ret::ret_test_6(int32) // -- Min ldc.i4 0x80000000 ceq brfalse FAIL //====== end testing ======== //---- branch here on pass -- PASS: ldc.i4 100 br END //---- branch here on fail -- FAIL: ldc.i4 101 //---- return the result ---- END: ret //---- END OF METHOD -------- } //---- EOF ------------------ } .assembly ldc_ret_i4{}
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } // //====================================== //---- CLASS ---------------- .class public _ret { //---- GLOBAL DATA ---------- //---- METHODS -------------- .method public static int32 ret_test_0(int32) { .maxstack 1 ldc.i4 0xAAAAAAAA ret } .method public static int32 ret_test_1(int32) { .maxstack 1 ldc.i4 0x55555555 ret } .method public static int32 ret_test_2(int32) { .maxstack 1 ldc.i4 0x7FFFFFFF ret } .method public static int32 ret_test_3(int32) { .maxstack 1 ldc.i4 0x00000001 ret } .method public static int32 ret_test_4(int32) { .maxstack 1 ldc.i4 0x00000000 ret } .method public static int32 ret_test_5(int32) { .maxstack 1 ldc.i4 0xFFFFFFFF ret } .method public static int32 ret_test_6(int32) { .maxstack 1 ldc.i4 0x80000000 ret } //---- CONSTRUCTOR ---------- .method public void _ret() { .maxstack 0 ret } //---- MAIN ----------------- .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 5 //====== begin testing ====== // -- Even ldc.i4 0xAAAAAAAA call int32 _ret::ret_test_0(int32) // -- Even ldc.i4 0xAAAAAAAA ceq brfalse FAIL // -- Odd ldc.i4 0x55555555 call int32 _ret::ret_test_1(int32) // -- Odd ldc.i4 0x55555555 ceq brfalse FAIL // -- Max ldc.i4 0x7FFFFFFF call int32 _ret::ret_test_2(int32) // -- Max ldc.i4 0x7FFFFFFF ceq brfalse FAIL // -- 1 ldc.i4 0x00000001 call int32 _ret::ret_test_3(int32) // -- 1 ldc.i4 0x00000001 ceq brfalse FAIL // -- 0 ldc.i4 0x00000000 call int32 _ret::ret_test_4(int32) // -- 0 ldc.i4 0x00000000 ceq brfalse FAIL // -- -1 ldc.i4 0xFFFFFFFF call int32 _ret::ret_test_5(int32) // -- -1 ldc.i4 0xFFFFFFFF ceq brfalse FAIL // -- Min ldc.i4 0x80000000 call int32 _ret::ret_test_6(int32) // -- Min ldc.i4 0x80000000 ceq brfalse FAIL //====== end testing ======== //---- branch here on pass -- PASS: ldc.i4 100 br END //---- branch here on fail -- FAIL: ldc.i4 101 //---- return the result ---- END: ret //---- END OF METHOD -------- } //---- EOF ------------------ } .assembly ldc_ret_i4{}
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/libraries/System.Runtime.Caching/tests/AdditionalCacheTests/AdditionalCacheTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; using System; using System.Runtime.Caching; namespace System.Runtime.Caching.Tests { // These are the tests to fill in some of the coverage in ported Mono caching tests public class AdditionalCacheTests { [Fact] public void DisposedCacheTest() { var mc = new MemoryCache("my disposed cache 1"); mc.Add("aa", "bb", new CacheItemPolicy()); mc.Dispose(); Assert.Null(mc["aa"]); mc = new MemoryCache("my disposed cache 2"); CacheEntryRemovedReason reason = (CacheEntryRemovedReason)1111; var cip = new CacheItemPolicy(); cip.RemovedCallback = (CacheEntryRemovedArguments args) => { reason = args.RemovedReason; }; mc.Set("key", "value", cip); mc.Dispose(); Assert.Equal(CacheEntryRemovedReason.CacheSpecificEviction, reason); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; using System; using System.Runtime.Caching; namespace System.Runtime.Caching.Tests { // These are the tests to fill in some of the coverage in ported Mono caching tests public class AdditionalCacheTests { [Fact] public void DisposedCacheTest() { var mc = new MemoryCache("my disposed cache 1"); mc.Add("aa", "bb", new CacheItemPolicy()); mc.Dispose(); Assert.Null(mc["aa"]); mc = new MemoryCache("my disposed cache 2"); CacheEntryRemovedReason reason = (CacheEntryRemovedReason)1111; var cip = new CacheItemPolicy(); cip.RemovedCallback = (CacheEntryRemovedArguments args) => { reason = args.RemovedReason; }; mc.Set("key", "value", cip); mc.Dispose(); Assert.Equal(CacheEntryRemovedReason.CacheSpecificEviction, reason); } } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/libraries/Common/tests/System/Xml/ModuleCore/cltmconsole.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.IO; using System.Text; using System.Diagnostics; namespace OLEDB.Test.ModuleCore { //////////////////////////////////////////////////////////////// // CLTMConsole // //////////////////////////////////////////////////////////////// public class CLTMConsole : TextWriter { //Data //Constructor public CLTMConsole() { } //Overloads - A subclass must minimally implement the Write(Char) method. public override void Write(char ch) { CError.Write(ch.ToString()); } //Overloads - We also implement "string" since its much more efficient and TextWriter will call this instead public override void Write(string strText) { CError.Write(strText); } //Overloads - We also implement "string" since its much more efficient and TextWriter will call this instead public override void Write(char[] ch) { //Note: This is a workaround the TextWriter::Write(char[]) that incorrectly //writes 1 char at a time, which means \r\n is written separately and then gets fixed //up to be two carriage returns! if (ch != null) { Write(new string(ch)); } } public override void WriteLine(string strText) { Write(strText + this.NewLine); } //Overloads //Writes a line terminator to the text stream. //The default line terminator is a carriage return followed by a line feed ("\r\n"), //but this value can be changed using the NewLine property. public override void WriteLine() { Write(this.NewLine); } //Overloads public override Encoding Encoding { get { return Encoding.Unicode; } } } //////////////////////////////////////////////////////////////// // CLTMTraceListener // //////////////////////////////////////////////////////////////// public class CLTMTraceListener //: TraceListener { //Data //Constructor public CLTMTraceListener() { } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.IO; using System.Text; using System.Diagnostics; namespace OLEDB.Test.ModuleCore { //////////////////////////////////////////////////////////////// // CLTMConsole // //////////////////////////////////////////////////////////////// public class CLTMConsole : TextWriter { //Data //Constructor public CLTMConsole() { } //Overloads - A subclass must minimally implement the Write(Char) method. public override void Write(char ch) { CError.Write(ch.ToString()); } //Overloads - We also implement "string" since its much more efficient and TextWriter will call this instead public override void Write(string strText) { CError.Write(strText); } //Overloads - We also implement "string" since its much more efficient and TextWriter will call this instead public override void Write(char[] ch) { //Note: This is a workaround the TextWriter::Write(char[]) that incorrectly //writes 1 char at a time, which means \r\n is written separately and then gets fixed //up to be two carriage returns! if (ch != null) { Write(new string(ch)); } } public override void WriteLine(string strText) { Write(strText + this.NewLine); } //Overloads //Writes a line terminator to the text stream. //The default line terminator is a carriage return followed by a line feed ("\r\n"), //but this value can be changed using the NewLine property. public override void WriteLine() { Write(this.NewLine); } //Overloads public override Encoding Encoding { get { return Encoding.Unicode; } } } //////////////////////////////////////////////////////////////// // CLTMTraceListener // //////////////////////////////////////////////////////////////// public class CLTMTraceListener //: TraceListener { //Data //Constructor public CLTMTraceListener() { } } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/baseservices/exceptions/generics/TypeParameter016.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="typeparameter016.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="typeparameter016.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/vm/gccover.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************/ /* gccover.cpp */ /****************************************************************************/ /* This file holds code that is designed to test GC pointer tracking in fully interruptible code. We basically do a GC everywhere we can in jitted code */ /****************************************************************************/ #include "common.h" #ifdef HAVE_GCCOVER #pragma warning(disable:4663) #include "eeconfig.h" #include "gms.h" #include "utsem.h" #include "gccover.h" #include "virtualcallstub.h" #include "threadsuspend.h" #if defined(TARGET_AMD64) || defined(TARGET_ARM) #include "gcinfodecoder.h" #endif #include "disassembler.h" /****************************************************************************/ MethodDesc* AsMethodDesc(size_t addr); static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE*nextInstr); #if defined(TARGET_ARM) || defined(TARGET_ARM64) static void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID codeStart); static bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 stopOffset, LPVOID codeStart); #endif // There is a call target instruction, try to find the MethodDesc for where target points to. // Returns nullptr if it can't find it. static MethodDesc* getTargetMethodDesc(PCODE target) { MethodDesc* targetMD = ExecutionManager::GetCodeMethodDesc(target); if (targetMD != nullptr) { // It is JIT/NGened call. return targetMD; } VirtualCallStubManager::StubKind vsdStubKind = VirtualCallStubManager::SK_UNKNOWN; VirtualCallStubManager *pVSDStubManager = VirtualCallStubManager::FindStubManager(target, &vsdStubKind); if (vsdStubKind != VirtualCallStubManager::SK_BREAKPOINT && vsdStubKind != VirtualCallStubManager::SK_UNKNOWN) { // It is a VSD stub manager. DispatchToken token(VirtualCallStubManager::GetTokenFromStubQuick(pVSDStubManager, target, vsdStubKind)); _ASSERTE(token.IsValid()); return VirtualCallStubManager::GetInterfaceMethodDescFromToken(token); } if (RangeSectionStubManager::GetStubKind(target) == STUB_CODE_BLOCK_PRECODE) { // The address looks like a value stub, try to get the method descriptor. return MethodDesc::GetMethodDescFromStubAddr(target, TRUE); } return nullptr; } bool IsGcCoverageInterruptInstruction(PBYTE instrPtr) { UINT32 instrVal; #if defined(TARGET_ARM64) instrVal = *reinterpret_cast<UINT32*>(instrPtr); #elif defined(TARGET_ARM) size_t instrLen = GetARMInstructionLength(instrPtr); if (instrLen == 2) { instrVal = *reinterpret_cast<UINT16*>(instrPtr); } else { instrVal = *reinterpret_cast<UINT32*>(instrPtr); } #else // x64 and x86 instrVal = *instrPtr; #endif return IsGcCoverageInterruptInstructionVal(instrVal); } bool IsOriginalInstruction(PBYTE instrPtr, GCCoverageInfo* gcCover, DWORD offset) { #if defined(TARGET_ARM64) UINT32 instrVal = *reinterpret_cast<UINT32*>(instrPtr); UINT32 origInstrVal = *reinterpret_cast<UINT32*>(gcCover->savedCode + offset); return (instrVal == origInstrVal); #elif defined(TARGET_ARM) size_t instrLen = GetARMInstructionLength(instrPtr); if (instrLen == 2) { UINT16 instrVal = *reinterpret_cast<UINT16*>(instrPtr); UINT16 origInstrVal = *reinterpret_cast<UINT16*>(gcCover->savedCode + offset); return (instrVal == origInstrVal); } else { _ASSERTE(instrLen == 4); UINT32 instrVal = *reinterpret_cast<UINT32*>(instrPtr); UINT32 origInstrVal = *reinterpret_cast<UINT32*>(gcCover->savedCode + offset); return (instrVal == origInstrVal); } #else // x64 and x86 UINT8 instrVal = *reinterpret_cast<UINT8*>(instrPtr); UINT8 origInstrVal = gcCover->savedCode[offset]; return (instrVal == origInstrVal); #endif } void SetupAndSprinkleBreakpoints( NativeCodeVersion nativeCodeVersion, EECodeInfo * pCodeInfo, IJitManager::MethodRegionInfo methodRegionInfo, BOOL fZapped ) { _ASSERTE(!nativeCodeVersion.IsNull()); // Allocate room for the GCCoverageInfo and copy of the method instructions MethodDesc *pMD = nativeCodeVersion.GetMethodDesc(); size_t memSize = sizeof(GCCoverageInfo) + methodRegionInfo.hotSize + methodRegionInfo.coldSize; GCCoverageInfo* gcCover = (GCCoverageInfo*)(void*) pMD->GetLoaderAllocator()->GetHighFrequencyHeap()->AllocAlignedMem(memSize, CODE_SIZE_ALIGN); memset(gcCover, 0, sizeof(GCCoverageInfo)); gcCover->methodRegion = methodRegionInfo; gcCover->codeMan = pCodeInfo->GetCodeManager(); gcCover->gcInfoToken = pCodeInfo->GetGCInfoToken(); gcCover->callerThread = 0; gcCover->doingEpilogChecks = true; gcCover->SprinkleBreakpoints(gcCover->savedCode, gcCover->methodRegion.hotStartAddress, gcCover->methodRegion.hotSize, 0, fZapped); // This is not required for ARM* as the above call does the work for both hot & cold regions #if !defined(TARGET_ARM) && !defined(TARGET_ARM64) if (gcCover->methodRegion.coldSize != 0) { gcCover->SprinkleBreakpoints(gcCover->savedCode + gcCover->methodRegion.hotSize, gcCover->methodRegion.coldStartAddress, gcCover->methodRegion.coldSize, gcCover->methodRegion.hotSize, fZapped); } #endif nativeCodeVersion.SetGCCoverageInfo(gcCover); } void SetupAndSprinkleBreakpointsForJittedMethod(NativeCodeVersion nativeCodeVersion, PCODE codeStart ) { _ASSERTE(!nativeCodeVersion.IsNull()); EECodeInfo codeInfo(codeStart); _ASSERTE(codeInfo.IsValid()); _ASSERTE(codeInfo.GetRelOffset() == 0); IJitManager::MethodRegionInfo methodRegionInfo; codeInfo.GetMethodRegionInfo(&methodRegionInfo); _ASSERTE(PCODEToPINSTR(codeStart) == methodRegionInfo.hotStartAddress); #ifdef _DEBUG if (!g_pConfig->SkipGCCoverage(nativeCodeVersion.GetMethodDesc()->GetModule()->GetSimpleName())) #endif SetupAndSprinkleBreakpoints(nativeCodeVersion, &codeInfo, methodRegionInfo, FALSE ); } /****************************************************************************/ /* called when a method is first jitted when GCStress level 4 or 8 is on */ void SetupGcCoverage(NativeCodeVersion nativeCodeVersion, BYTE* methodStartPtr) { _ASSERTE(!nativeCodeVersion.IsNull()); #ifdef _DEBUG if (!g_pConfig->ShouldGcCoverageOnMethod(nativeCodeVersion.GetMethodDesc()->m_pszDebugMethodName)) { return; } #endif // Ideally we would assert here that m_GcCover is NULL. // // However, we can't do that (at least not yet), because we may // invoke this method more than once on a given // MethodDesc. Examples include prejitted methods and rejitted // methods. // // In the prejit case, we can't safely re-instrument an already // instrumented method. By bailing out here, we will use the // original instrumentation, which should still be valid as // the method code has not changed. // // In the rejit case, the old method code may still be active and // instrumented, so we need to preserve that gc cover info. By // bailing out here we will skip instrumenting the rejitted native // code, and since the rejitted method does not get instrumented // we should be able to tolerate that the gc cover info does not // match. if (nativeCodeVersion.GetGCCoverageInfo() != NULL) { return; } PCODE codeStart = (PCODE) methodStartPtr; SetupAndSprinkleBreakpointsForJittedMethod(nativeCodeVersion, codeStart); } void ReplaceInstrAfterCall(PBYTE instrToReplace, MethodDesc* callMD) { ReturnKind returnKind = callMD->GetReturnKind(true); if (!IsValidReturnKind(returnKind)) { #if defined(TARGET_AMD64) && defined(TARGET_UNIX) _ASSERTE(!"Unexpected return kind for x64 Unix."); #else // SKip GC coverage after the call. return; #endif } _ASSERTE(IsValidReturnKind(returnKind)); bool ispointerKind = IsPointerReturnKind(returnKind); #ifdef TARGET_ARM size_t instrLen = GetARMInstructionLength(instrToReplace); bool protectReturn = ispointerKind; if (protectReturn) if (instrLen == 2) *(WORD*)instrToReplace = INTERRUPT_INSTR_PROTECT_RET; else *(DWORD*)instrToReplace = INTERRUPT_INSTR_PROTECT_RET_32; else if (instrLen == 2) *(WORD*)instrToReplace = INTERRUPT_INSTR; else *(DWORD*)instrToReplace = INTERRUPT_INSTR_32; #elif defined(TARGET_ARM64) bool protectReturn = ispointerKind; if (protectReturn) *(DWORD*)instrToReplace = INTERRUPT_INSTR_PROTECT_RET; else *(DWORD*)instrToReplace = INTERRUPT_INSTR; #elif defined(TARGET_AMD64) || defined(TARGET_X86) if (ispointerKind) { bool protectRegister[2] = { false, false }; bool moreRegisters = false; ReturnKind fieldKind1 = ExtractRegReturnKind(returnKind, 0, moreRegisters); if (IsPointerFieldReturnKind(fieldKind1)) { protectRegister[0] = true; } if (moreRegisters) { ReturnKind fieldKind2 = ExtractRegReturnKind(returnKind, 1, moreRegisters); if (IsPointerFieldReturnKind(fieldKind2)) { protectRegister[1] = true; } } _ASSERTE(!moreRegisters); if (protectRegister[0] && !protectRegister[1]) { *instrToReplace = INTERRUPT_INSTR_PROTECT_FIRST_RET; } else { #if !defined(TARGET_AMD64) || !defined(TARGET_UNIX) _ASSERTE(!"Not expected multi reg return with pointers."); #endif // !TARGET_AMD64 || !TARGET_UNIX if (!protectRegister[0] && protectRegister[1]) { *instrToReplace = INTERRUPT_INSTR_PROTECT_SECOND_RET; } else { _ASSERTE(protectRegister[0] && protectRegister[1]); *instrToReplace = INTERRUPT_INSTR_PROTECT_BOTH_RET; } } } else { *instrToReplace = INTERRUPT_INSTR; } #else _ASSERTE(!"not implemented for platform"); #endif } #ifdef TARGET_AMD64 class GCCoverageRangeEnumerator { private: ICodeManager *m_pCodeManager; GCInfoToken m_pvGCTable; BYTE *m_codeStart; BYTE *m_codeEnd; BYTE *m_curFuncletEnd; BYTE *m_nextFunclet; BYTE* GetNextFunclet () { if (m_nextFunclet == NULL) return m_codeEnd; BYTE *pCurFunclet = (BYTE*)EECodeInfo::findNextFunclet(m_nextFunclet, m_codeEnd - m_nextFunclet, (LPVOID*)&m_curFuncletEnd); m_nextFunclet = (pCurFunclet != NULL) ? m_curFuncletEnd : NULL; if (pCurFunclet == NULL) return m_codeEnd; LOG((LF_JIT, LL_INFO1000, "funclet range %p-%p\n", pCurFunclet, m_curFuncletEnd)); // // workaround - adjust the funclet end address to exclude uninterruptible // code at the end of each funclet. The jit currently puts data like // jump tables in the code portion of the allocation, instead of the // read-only portion. // // TODO: If the entire range is uninterruptible, we should skip the // entire funclet. // unsigned ofsLastInterruptible = m_pCodeManager->FindEndOfLastInterruptibleRegion( static_cast<unsigned int>(pCurFunclet - m_codeStart), static_cast<unsigned int>(m_curFuncletEnd - m_codeStart), m_pvGCTable); if (ofsLastInterruptible) { m_curFuncletEnd = m_codeStart + ofsLastInterruptible; LOG((LF_JIT, LL_INFO1000, "adjusted end to %p\n", m_curFuncletEnd)); } return pCurFunclet; } public: GCCoverageRangeEnumerator (ICodeManager *pCodeManager, GCInfoToken pvGCTable, BYTE *codeStart, SIZE_T codeSize) { m_pCodeManager = pCodeManager; m_pvGCTable = pvGCTable; m_codeStart = codeStart; m_codeEnd = codeStart + codeSize; m_nextFunclet = codeStart; GetNextFunclet(); } // Checks that the given pointer is inside of a range where gc should be // tested. If not, increments the pointer until it is, and returns the // new pointer. BYTE *EnsureInRange (BYTE *cur) { if (cur >= m_curFuncletEnd) { cur = GetNextFunclet(); } return cur; } BYTE *SkipToNextRange () { return GetNextFunclet(); } }; #endif // TARGET_AMD64 /****************************************************************************/ /* sprinkle interrupt instructions that will stop on every GCSafe location regionOffsetAdj - Represents the offset of the current region from the beginning of the method (is 0 for hot region) */ void GCCoverageInfo::SprinkleBreakpoints( BYTE * saveAddr, PCODE pCode, size_t codeSize, size_t regionOffsetAdj, BOOL fZapped) { #if (defined(TARGET_X86) || defined(TARGET_AMD64)) && USE_DISASSEMBLER BYTE * codeStart = (BYTE *)pCode; ExecutableWriterHolder<BYTE> codeWriterHolder; size_t writeableOffset; memcpy(saveAddr, codeStart, codeSize); // For prejitted code we have to remove the write-protect on the code page if (fZapped) { DWORD oldProtect; ClrVirtualProtect(codeStart, codeSize, PAGE_EXECUTE_READWRITE, &oldProtect); writeableOffset = 0; } else { codeWriterHolder = ExecutableWriterHolder<BYTE>(codeStart, codeSize); writeableOffset = codeWriterHolder.GetRW() - codeStart; } PBYTE cur; BYTE* codeEnd = codeStart + codeSize; EECodeInfo codeInfo((PCODE)codeStart); static ConfigDWORD fGcStressOnDirectCalls; // ConfigDWORD must be a static variable #ifdef TARGET_AMD64 GCCoverageRangeEnumerator rangeEnum(codeMan, gcInfoToken, codeStart, codeSize); GcInfoDecoder safePointDecoder(gcInfoToken, (GcInfoDecoderFlags)0, 0); bool fSawPossibleSwitch = false; #endif cur = codeStart; Disassembler disassembler; // When we find a direct call instruction and we are partially-interruptible // we determine the target and place a breakpoint after the call // to simulate the hijack // However, we need to wait until we disassemble the instruction // after the call in order to put the breakpoint or we'll mess up // the disassembly // This variable is non-null if the previous instruction was a direct call, // and we have found it's target MethodDesc MethodDesc* prevDirectCallTargetMD = NULL; /* TODO. Simulating the hijack could cause problems in cases where the return register is not always a valid GC ref on the return offset. That could happen if we got to the return offset via a branch and not via return from the preceding call. However, this has not been an issue so far. Example: mov eax, someval test eax, eax jCC AFTERCALL call MethodWhichReturnsGCobject // return value is not used AFTERCALL: */ while (cur < codeEnd) { _ASSERTE(*cur != INTERRUPT_INSTR && *cur != INTERRUPT_INSTR_CALL); MethodDesc* targetMD = NULL; InstructionType instructionType; size_t len = disassembler.DisassembleInstruction(cur, codeEnd - cur, &instructionType); #ifdef TARGET_AMD64 // REVISIT_TODO apparently the jit does not use the entire RUNTIME_FUNCTION range // for code. It uses some for switch tables. Because the first few offsets // may be decodable as instructions, we can't reason about where we should // encounter invalid instructions. However, we do not want to silently skip // large chunks of methods just because the JIT started emitting a new // instruction, so only assume it is a switch table if we've seen the switch // code (an indirect unconditional jump) if ((len == 0) && fSawPossibleSwitch) { LOG((LF_JIT, LL_WARNING, "invalid instruction at %p (possibly start of switch table)\n", cur)); cur = rangeEnum.SkipToNextRange(); prevDirectCallTargetMD = NULL; fSawPossibleSwitch = false; continue; } #endif _ASSERTE(len > 0); _ASSERTE(len <= (size_t)(codeEnd-cur)); switch(instructionType) { case InstructionType::Call_IndirectUnconditional: #ifdef TARGET_AMD64 if(safePointDecoder.IsSafePoint((UINT32)(cur + len - codeStart + regionOffsetAdj))) #endif { *(cur + writeableOffset) = INTERRUPT_INSTR_CALL; // return value. May need to protect } break; case InstructionType::Call_DirectUnconditional: if(fGcStressOnDirectCalls.val(CLRConfig::INTERNAL_GcStressOnDirectCalls)) { #ifdef TARGET_AMD64 if(safePointDecoder.IsSafePoint((UINT32)(cur + len - codeStart + regionOffsetAdj))) #endif { PBYTE nextInstr; PBYTE target = getTargetOfCall(cur, NULL, &nextInstr); if (target != 0) { targetMD = getTargetMethodDesc((PCODE)target); } } } break; #ifdef TARGET_AMD64 case InstructionType::Branch_IndirectUnconditional: fSawPossibleSwitch = true; break; #endif default: // Clang issues an error saying that some enum values are not handled in the switch, that's intended break; } if (prevDirectCallTargetMD != 0) { ReplaceInstrAfterCall(cur + writeableOffset, prevDirectCallTargetMD); } // For fully interruptible code, we end up whacking every instruction // to INTERRUPT_INSTR. For non-fully interruptible code, we end // up only touching the call instructions (specially so that we // can really do the GC on the instruction just after the call). size_t dwRelOffset = (cur - codeStart) + regionOffsetAdj; _ASSERTE(FitsIn<DWORD>(dwRelOffset)); if (codeMan->IsGcSafe(&codeInfo, static_cast<DWORD>(dwRelOffset))) { *(cur + writeableOffset) = INTERRUPT_INSTR; } #ifdef TARGET_X86 // we will whack every instruction in the prolog and epilog to make certain // our unwinding logic works there. if (codeMan->IsInPrologOrEpilog((cur - codeStart) + (DWORD)regionOffsetAdj, gcInfoToken, NULL)) { *(cur + writeableOffset) = INTERRUPT_INSTR; } #endif // If we couldn't find the method desc targetMD is zero prevDirectCallTargetMD = targetMD; cur += len; #ifdef TARGET_AMD64 PBYTE newCur = rangeEnum.EnsureInRange(cur); if(newCur != cur) { prevDirectCallTargetMD = NULL; cur = newCur; fSawPossibleSwitch = false; } #endif } // If we are not able to place an interrupt at the first instruction, this means that // we are partially interruptible with no prolog. Just don't bother to do the // the epilog checks, since the epilog will be trivial (a single return instr) assert(codeSize > 0); if ((regionOffsetAdj==0) && (*codeStart != INTERRUPT_INSTR)) doingEpilogChecks = false; #elif defined(TARGET_ARM) || defined(TARGET_ARM64) //Save the method code from hotRegion memcpy(saveAddr, (BYTE*)methodRegion.hotStartAddress, methodRegion.hotSize); if (methodRegion.coldSize > 0) { //Save the method code from coldRegion memcpy(saveAddr+methodRegion.hotSize, (BYTE*)methodRegion.coldStartAddress, methodRegion.coldSize); } // For prejitted code we have to remove the write-protect on the code page if (fZapped) { DWORD oldProtect; ClrVirtualProtect((BYTE*)methodRegion.hotStartAddress, methodRegion.hotSize, PAGE_EXECUTE_READWRITE, &oldProtect); if (methodRegion.coldSize > 0) { ClrVirtualProtect((BYTE*)methodRegion.coldStartAddress, methodRegion.coldSize, PAGE_EXECUTE_READWRITE, &oldProtect); } } GcInfoDecoder safePointDecoder(gcInfoToken, (GcInfoDecoderFlags)0, 0); assert(methodRegion.hotSize > 0); #ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED safePointDecoder.EnumerateSafePoints(&replaceSafePointInstructionWithGcStressInstr,this); #endif // PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED safePointDecoder.EnumerateInterruptibleRanges(&replaceInterruptibleRangesWithGcStressInstr, this); FlushInstructionCache(GetCurrentProcess(), (BYTE*)methodRegion.hotStartAddress, methodRegion.hotSize); if (methodRegion.coldSize > 0) { FlushInstructionCache(GetCurrentProcess(), (BYTE*)methodRegion.coldStartAddress, methodRegion.coldSize); } #else _ASSERTE(!"not implemented for platform"); #endif // TARGET_X86 } #if defined(TARGET_ARM) || defined(TARGET_ARM64) #ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID pGCCover) { PCODE pCode = NULL; IJitManager::MethodRegionInfo *ptr = &(((GCCoverageInfo*)pGCCover)->methodRegion); //Get code address from offset if (safePointOffset < ptr->hotSize) pCode = ptr->hotStartAddress + safePointOffset; else if(safePointOffset - ptr->hotSize < ptr->coldSize) { SIZE_T coldOffset = safePointOffset - ptr->hotSize; pCode = ptr->coldStartAddress + coldOffset; } else { //For some methods( eg MCCTest.MyClass.GetSum2 in test file jit\jit64\mcc\interop\mcc_i07.il) gcinfo points to a safepoint //beyond the length of the method. So commenting the below assert. //_ASSERTE(safePointOffset - ptr->hotSize < ptr->coldSize); return; } PBYTE instrPtr = (BYTE*)PCODEToPINSTR(pCode); // For code sequences of the type // BL func1 // BL func2 // Safe point 1 // mov r1 r0 // Safe point 2 // Both the above safe points instruction must be replaced with gcStress instruction. // However as the first safe point is already replaced with gcstress instruction, decoding of the call // instruction will fail when processing for the 2nd safe point. Therefore saved instruction must be used instead of // instrPtr for decoding the call instruction. PBYTE savedInstrPtr = ((GCCoverageInfo*)pGCCover)->savedCode + safePointOffset; //Determine if instruction before the safe point is call using immediate (BLX Imm) or call by register (BLX Rm) BOOL instructionIsACallThroughRegister = FALSE; BOOL instructionIsACallThroughImmediate = FALSE; #if defined(TARGET_ARM) // POSSIBLE BUG: Note that we are looking backwards by 2 or 4 bytes, looking for particular call instruction encodings. // However, we don't know if the previous instruction is 2 bytes or 4 bytes. Looking back 2 bytes could be looking into // the middle of a 4-byte instruction. The only safe way to do this is by walking forward from the first instruction of // the function. // call by register instruction is two bytes (BL<c> Reg T1 encoding) WORD instr = *((WORD*)savedInstrPtr - 1); instr = instr & 0xff87; if ((instr ^ 0x4780) == 0) { // It is call by register instructionIsACallThroughRegister = TRUE; } else { // call using immediate instructions are 4 bytes (BL<c> <label> T1 encoding) instr = *((WORD*)savedInstrPtr - 2); instr = instr & 0xf800; if ((instr ^ 0xf000) == 0) { if ((*(((WORD*)savedInstrPtr) - 1) & 0xd000) == 0xd000) { // It is call by immediate instructionIsACallThroughImmediate = TRUE; } } } #elif defined(TARGET_ARM64) DWORD instr = *((DWORD*)savedInstrPtr - 1); // Is the call through a register or an immediate offset // BL // Encoding: 0x94000000 & [imm26] if ((instr & 0xFC000000) == 0x94000000) { instructionIsACallThroughImmediate = TRUE; } // BLR // Encoding: 0xD63F0000 & (Rn<<5) else if ((instr & 0xFFFFFC1F) == 0xD63F0000) { instructionIsACallThroughRegister = TRUE; } #endif // _TARGET_XXXX_ // safe point must always be after a call instruction // and cannot be both call by register & immediate // The safe points are also marked at jump calls( a special variant of // tail call). However that call site will never appear on the stack. // So commenting the assert for now. As for such places the previous // instruction will not be a call instruction. //_ASSERTE(instructionIsACallThroughRegister ^ instructionIsACallThroughImmediate); #if defined(TARGET_ARM) size_t instrLen = sizeof(WORD); #else size_t instrLen = sizeof(DWORD); #endif ExecutableWriterHolder<BYTE> instrPtrWriterHolder(instrPtr - instrLen, 2 * instrLen); if(instructionIsACallThroughRegister) { // If it is call by register then cannot know MethodDesc so replace the call instruction with illegal instruction // safe point will be replaced with appropriate illegal instruction at execution time when reg value is known #if defined(TARGET_ARM) *((WORD*)instrPtrWriterHolder.GetRW()) = INTERRUPT_INSTR_CALL; #elif defined(TARGET_ARM64) *((DWORD*)instrPtrWriterHolder.GetRW()) = INTERRUPT_INSTR_CALL; #endif // _TARGET_XXXX_ } else if(instructionIsACallThroughImmediate) { // If it is call by immediate then find the methodDesc PBYTE nextInstr; PBYTE target = getTargetOfCall((PBYTE)((WORD*)savedInstrPtr-2), NULL, &nextInstr); if (target != 0) { //Target is calculated wrt the saved instruction pointer //Find the real target wrt the real instruction pointer int delta = static_cast<int>(target - savedInstrPtr); target = delta + instrPtr; MethodDesc* targetMD = getTargetMethodDesc((PCODE)target); if (targetMD != 0) { // The instruction about to be replaced cannot already be a gcstress instruction _ASSERTE(!IsGcCoverageInterruptInstruction(instrPtr)); // // When applying GC coverage breakpoints at native image load time, the code here runs // before eager fixups are applied for the module being loaded. The direct call target // never requires restore, however it is possible that it is initially in an invalid state // and remains invalid until one or more eager fixups are applied. // // ReplaceInstrAfterCall consults the method signature, meaning it consults the // metadata in the owning module. For generic instantiations stored in non-preferred // modules, reaching the owning module requires following the module override pointer for // the enclosing MethodTable. In this case, the module override pointer is generally // invalid until an associated eager fixup is applied. // // In situations like this, ReplaceInstrAfterCall will try to dereference an // unresolved fixup and will AV. // // Given all of this, skip the ReplaceInstrAfterCall call by default to avoid // unexpected AVs. This implies leaving out the GC coverage breakpoints for direct calls // unless COMPlus_GcStressOnDirectCalls=1 is explicitly set in the environment. // static ConfigDWORD fGcStressOnDirectCalls; if (fGcStressOnDirectCalls.val(CLRConfig::INTERNAL_GcStressOnDirectCalls)) { ReplaceInstrAfterCall(instrPtrWriterHolder.GetRW() + instrLen, targetMD); } } } } } #endif // PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED //Replaces the provided interruptible range with corresponding 2 or 4 byte gcStress illegal instruction bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 stopOffset, LPVOID pGCCover) { PCODE pCode = NULL; PBYTE rangeStart = NULL; PBYTE rangeStop = NULL; //Interruptible range can span across hot & cold region int acrossHotRegion = 1; // 1 means range is not across end of hot region & 2 is when it is across end of hot region //Find the code addresses from offsets IJitManager::MethodRegionInfo *ptr = &(((GCCoverageInfo*)pGCCover)->methodRegion); if (startOffset < ptr->hotSize) { pCode = ptr->hotStartAddress + startOffset; rangeStart = (BYTE*)PCODEToPINSTR(pCode); if(stopOffset <= ptr->hotSize) { pCode = ptr->hotStartAddress + stopOffset; rangeStop = (BYTE*)PCODEToPINSTR(pCode); } else { //Interruptible range is spanning across hot & cold region pCode = ptr->hotStartAddress + ptr->hotSize; rangeStop = (BYTE*)PCODEToPINSTR(pCode); acrossHotRegion++; } } else { SIZE_T coldOffset = startOffset - ptr->hotSize; _ASSERTE(coldOffset < ptr->coldSize); pCode = ptr->coldStartAddress + coldOffset; rangeStart = (BYTE*)PCODEToPINSTR(pCode); coldOffset = stopOffset - ptr->hotSize; _ASSERTE(coldOffset <= ptr->coldSize); pCode = ptr->coldStartAddress + coldOffset; rangeStop = (BYTE*)PCODEToPINSTR(pCode); } // Need to do two iterations if interruptible range spans across hot & cold region while(acrossHotRegion--) { ExecutableWriterHolder<BYTE> instrPtrWriterHolder(rangeStart, rangeStop - rangeStart); PBYTE instrPtrRW = instrPtrWriterHolder.GetRW(); PBYTE rangeStopRW = instrPtrRW + (rangeStop - rangeStart); while(instrPtrRW < rangeStopRW) { // The instruction about to be replaced cannot already be a gcstress instruction _ASSERTE(!IsGcCoverageInterruptInstruction(instrPtrRW)); #if defined(TARGET_ARM) size_t instrLen = GetARMInstructionLength(instrPtrRW); if (instrLen == 2) *((WORD*)instrPtrRW) = INTERRUPT_INSTR; else { *((DWORD*)instrPtrRW) = INTERRUPT_INSTR_32; } instrPtrRW += instrLen; #elif defined(TARGET_ARM64) *((DWORD*)instrPtrRW) = INTERRUPT_INSTR; instrPtrRW += 4; #endif // TARGET_XXXX_ } if(acrossHotRegion) { //Set rangeStart & rangeStop for the second iteration _ASSERTE(acrossHotRegion==1); rangeStart = (BYTE*)PCODEToPINSTR(ptr->coldStartAddress); pCode = ptr->coldStartAddress + stopOffset - ptr->hotSize; rangeStop = (BYTE*)PCODEToPINSTR(pCode); } } return FALSE; } #endif // defined(TARGET_ARM) || defined(TARGET_ARM64) static size_t getRegVal(unsigned regNum, PCONTEXT regs) { return *getRegAddr(regNum, regs); } /****************************************************************************/ static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE* nextInstr) { BYTE sibindexadj = 0; BYTE baseadj = 0; WORD displace = 0; // In certain situations, the instruction bytes are read from a different // location than the actual bytes being executed. // When decoding the instructions of a method which is sprinkled with // TRAP instructions for GCStress, we decode the bytes from a copy // of the instructions stored before the traps-for-gc were inserted. // However, the PC-relative addressing/displacement of the CALL-target // will still be with respect to the currently executing PC. // So, if a register context is available, we pick the PC from it // (for address calculation purposes only). PBYTE PC = (regs) ? (PBYTE)GetIP(regs) : instrPtr; #ifdef TARGET_ARM if((instrPtr[1] & 0xf0) == 0xf0) // direct call { int imm32 = GetThumb2BlRel24((UINT16 *)instrPtr); *nextInstr = instrPtr + 4; return PC + 4 + imm32; } else if(((instrPtr[1] & 0x47) == 0x47) & ((instrPtr[0] & 0x80) == 0x80)) // indirect call { *nextInstr = instrPtr + 2; unsigned int regnum = (instrPtr[0] & 0x78) >> 3; return (BYTE *)getRegVal(regnum, regs); } else { return 0; // Not a call. } #elif defined(TARGET_ARM64) if (((*reinterpret_cast<DWORD*>(instrPtr)) & 0xFC000000) == 0x94000000) { // call through immediate int imm26 = ((*((DWORD*)instrPtr)) & 0x03FFFFFF)<<2; // SignExtend the immediate value. imm26 = (imm26 << 4) >> 4; *nextInstr = instrPtr + 4; return PC + imm26; } else if (((*reinterpret_cast<DWORD*>(instrPtr)) & 0xFFFFC1F) == 0xD63F0000) { // call through register *nextInstr = instrPtr + 4; unsigned int regnum = ((*(DWORD*)instrPtr) >> 5) & 0x1F; return (BYTE *)getRegVal(regnum, regs); } else { return 0; // Fail } #endif #ifdef TARGET_AMD64 if ((instrPtr[0] & 0xf0) == REX_PREFIX_BASE) { static_assert_no_msg(REX_SIB_BASE_EXT == REX_MODRM_RM_EXT); if (instrPtr[0] & REX_SIB_BASE_EXT) baseadj = 8; if (instrPtr[0] & REX_SIB_INDEX_EXT) sibindexadj = 8; instrPtr++; } #endif // TARGET_AMD64 if (instrPtr[0] == 0xE8) { // Direct Relative Near *nextInstr = instrPtr + 5; size_t base = (size_t) PC + 5; INT32 displacement = (INT32) ( ((UINT32)instrPtr[1]) + (((UINT32)instrPtr[2]) << 8) + (((UINT32)instrPtr[3]) << 16) + (((UINT32)instrPtr[4]) << 24) ); // Note that the signed displacement is sign-extended // to 64-bit on AMD64 return((PBYTE)(base + (SSIZE_T)displacement)); } if (instrPtr[0] == 0xFF) { // Indirect Absolute Near _ASSERTE(regs); BYTE mod = (instrPtr[1] & 0xC0) >> 6; BYTE rm = (instrPtr[1] & 0x7); PBYTE result; switch (mod) { case 0: case 1: case 2: if (rm == 4) { // // Get values from the SIB byte // BYTE ss = (instrPtr[2] & 0xC0) >> 6; BYTE index = (instrPtr[2] & 0x38) >> 3; BYTE base = (instrPtr[2] & 0x7); // // Get starting value // if ((mod == 0) && (base == 5)) { result = 0; } else { result = (BYTE *)getRegVal(baseadj + base, regs); } // // Add in the [index] // if (index != 0x4) { result = result + (getRegVal(sibindexadj + index, regs) << ss); } // // Finally add in the offset // if (mod == 0) { if (base == 5) { result = result + *((int *)&instrPtr[3]); displace += 7; } else { displace += 3; } } else if (mod == 1) { result = result + *((char *)&instrPtr[3]); displace += 4; } else { // == 2 result = result + *((int *)&instrPtr[3]); displace += 7; } } else { // // Get the value we need from the register. // if ((mod == 0) && (rm == 5)) { #ifdef TARGET_AMD64 // at this point instrPtr should be pointing at the beginning // of the byte sequence for the call instruction. the operand // is a RIP-relative address from the next instruction, so to // calculate the address of the next instruction we need to // jump forward 6 bytes: 1 for the opcode, 1 for the ModRM byte, // and 4 for the operand. see AMD64 Programmer's Manual Vol 3. result = PC + 6; #else result = 0; #endif // TARGET_AMD64 } else { result = (PBYTE)getRegVal(baseadj + rm, regs); } if (mod == 0) { if (rm == 5) { result = result + *((int *)&instrPtr[2]); displace += 6; } else { displace += 2; } } else if (mod == 1) { result = result + *((char *)&instrPtr[2]); displace += 3; } else { // == 2 result = result + *((int *)&instrPtr[2]); displace += 6; } } // // Now dereference thru the result to get the resulting IP. // result = (PBYTE)(*((PBYTE *)result)); break; case 3: default: result = (PBYTE)getRegVal(baseadj + rm, regs); displace += 2; break; } *nextInstr = instrPtr + displace; return result; } return(0); // Fail } /****************************************************************************/ #ifdef TARGET_X86 void checkAndUpdateReg(DWORD& origVal, DWORD curVal, bool gcHappened) { if (origVal == curVal) return; // If these asserts go off, they indicate either that unwinding out of a epilog is wrong or that // the validation infrastructure has got a bug. _ASSERTE(gcHappened); // If the register values are different, a GC must have happened _ASSERTE(GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE*) size_t(origVal))); // And the pointers involved are on the GCHeap _ASSERTE(GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE*) size_t(curVal))); origVal = curVal; // this is now the best estimate of what should be returned. } #endif // TARGET_X86 int GCcoverCount = 0; void* forceStack[8]; /****************************************************************************/ bool IsGcCoverageInterrupt(LPVOID ip) { // Determine if the IP is valid for a GC marker first, before trying to dereference it to check the instruction EECodeInfo codeInfo(reinterpret_cast<PCODE>(ip)); if (!codeInfo.IsValid()) { return false; } NativeCodeVersion nativeCodeVersion = codeInfo.GetNativeCodeVersion(); _ASSERTE(!nativeCodeVersion.IsNull()); GCCoverageInfo *gcCover = nativeCodeVersion.GetGCCoverageInfo(); if (gcCover == nullptr) { return false; } PBYTE instrPtr = reinterpret_cast<PBYTE>(ip); if (IsGcCoverageInterruptInstruction(instrPtr)) { return true; } if (IsOriginalInstruction(instrPtr, gcCover, codeInfo.GetRelOffset())) { // Another thread may have already changed the code back to the original. return true; } return false; } // Remove the GcCoverage interrupt instruction, and restore the // original instruction. Only one instruction must be used, // because multiple threads can be executing the same code stream. void RemoveGcCoverageInterrupt(TADDR instrPtr, BYTE * savedInstrPtr, GCCoverageInfo* gcCover, DWORD offset) { ExecutableWriterHolder<void> instrPtrWriterHolder((void*)instrPtr, 4); #ifdef TARGET_ARM if (GetARMInstructionLength(savedInstrPtr) == 2) *(WORD *)instrPtrWriterHolder.GetRW() = *(WORD *)savedInstrPtr; else *(DWORD *)instrPtrWriterHolder.GetRW() = *(DWORD *)savedInstrPtr; #elif defined(TARGET_ARM64) *(DWORD *)instrPtrWriterHolder.GetRW() = *(DWORD *)savedInstrPtr; #else *(BYTE *)instrPtrWriterHolder.GetRW() = *savedInstrPtr; #endif #ifdef TARGET_X86 // Epilog checking relies on precise control of when instrumentation for the first prolog // instruction is enabled or disabled. In particular, if a function has multiple epilogs, or // the first execution of the function terminates via an exception, and subsequent completions // do not, then the function may trigger a false stress fault if epilog checks are not disabled. if (offset == 0) { gcCover->doingEpilogChecks = false; } #endif // TARGET_X86 FlushInstructionCache(GetCurrentProcess(), (LPCVOID)instrPtr, 4); } // A managed thread (T) can race with the GC as follows: // 1) At the first safepoint, we notice that T is in preemptive mode during the call for GCStress // So, it is put it in cooperative mode for the purpose of GCStress(fPreemptiveGcDisabledForGcStress) // 2) We DoGCStress(). Start off background GC in a different thread. // 3) Then the thread T is put back to preemptive mode (because that's where it was). // Thread T continues execution along with the GC thread. // 4) The Jitted code puts thread T to cooperative mode, as part of PInvoke epilog // 5) Now instead of CORINFO_HELP_STOP_FOR_GC(), we hit the GCStress trap and start // another round of GCStress while in Cooperative mode. // 6) Now, thread T can modify the stack (ex: RedirectionFrame setup) while the GC thread is scanning it. // // This race is now mitigated below. Where we won't initiate a stress mode GC // for a thread in cooperative mode with an active ICF, if g_TrapReturningThreads is true. BOOL OnGcCoverageInterrupt(PCONTEXT regs) { // So that you can set counted breakpoint easily; GCcoverCount++; forceStack[0]= &regs; // This is so I can see it fastchecked PCODE controlPc = GetIP(regs); TADDR instrPtr = PCODEToPINSTR(controlPc); forceStack[0] = &instrPtr; // This is so I can see it fastchecked EECodeInfo codeInfo(controlPc); if (!codeInfo.IsValid()) return(FALSE); MethodDesc* pMD = codeInfo.GetMethodDesc(); DWORD offset = codeInfo.GetRelOffset(); forceStack[1] = &pMD; // This is so I can see it fastchecked forceStack[2] = &offset; // This is so I can see it fastchecked NativeCodeVersion nativeCodeVersion = codeInfo.GetNativeCodeVersion(); _ASSERTE(!nativeCodeVersion.IsNull()); GCCoverageInfo* gcCover = nativeCodeVersion.GetGCCoverageInfo(); forceStack[3] = &gcCover; // This is so I can see it fastchecked if (gcCover == 0) return(FALSE); // we aren't doing code gcCoverage on this function BYTE * savedInstrPtr = &gcCover->savedCode[offset]; Thread* pThread = GetThreadNULLOk(); if (!pThread) { // No thread at the moment so we aren't doing coverage for this function. // This should only occur for methods with the UnmanagedCallersOnlyAttribute, // where the call could be coming from a thread unknown to the CLR and // we haven't created a thread yet - see PreStubWorker_Preemptive(). _ASSERTE(pMD->HasUnmanagedCallersOnlyAttribute()); RemoveGcCoverageInterrupt(instrPtr, savedInstrPtr, gcCover, offset); return TRUE; } // If the thread is in preemptive mode then we must be in a // PInvoke stub, a method that has an inline PInvoke frame, // or be in a reverse PInvoke stub that's about to return. // // The PInvoke cases should should properly report GC refs if we // trigger GC here. But a reverse PInvoke stub may over-report // leading to spurious failures, as we would not normally report // anything for this method at this point. if (!pThread->PreemptiveGCDisabled() && pMD->HasUnmanagedCallersOnlyAttribute()) { RemoveGcCoverageInterrupt(instrPtr, savedInstrPtr, gcCover, offset); return TRUE; } // If we're in cooperative mode, we're supposed to stop for GC, // and there's an active ICF, don't initiate a stress GC. if (g_TrapReturningThreads && pThread->PreemptiveGCDisabled()) { Frame* pFrame = pThread->GetFrame(); if (InlinedCallFrame::FrameHasActiveCall(pFrame)) { RemoveGcCoverageInterrupt(instrPtr, savedInstrPtr, gcCover, offset); return TRUE; } } #if defined(USE_REDIRECT_FOR_GCSTRESS) && !defined(TARGET_UNIX) // If we're unable to redirect, then we simply won't test GC at this // location. if (!pThread->CheckForAndDoRedirectForGCStress(regs)) { RemoveGcCoverageInterrupt(instrPtr, savedInstrPtr, gcCover, offset); } #else // !USE_REDIRECT_FOR_GCSTRESS #ifdef _DEBUG if (!g_pConfig->SkipGCCoverage(pMD->GetModule()->GetSimpleName())) #endif DoGcStress(regs, codeInfo.GetNativeCodeVersion()); #endif // !USE_REDIRECT_FOR_GCSTRESS return TRUE; } // There are some code path in DoGcStress to return without doing a GC but we // now relies on EE suspension to update the GC STRESS instruction. // We need to do a extra EE suspension/resume even without GC. FORCEINLINE void UpdateGCStressInstructionWithoutGC () { ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_OTHER); ThreadSuspend::RestartEE(TRUE, TRUE); } /****************************************************************************/ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) { PCODE controlPc = GetIP(regs); PBYTE instrPtr = reinterpret_cast<PBYTE>(PCODEToPINSTR(controlPc)); if (nativeCodeVersion.IsNull()) { nativeCodeVersion = ExecutionManager::GetNativeCodeVersion(controlPc); if (nativeCodeVersion.IsNull()) return; } GCCoverageInfo *gcCover = nativeCodeVersion.GetGCCoverageInfo(); EECodeInfo codeInfo(controlPc); _ASSERTE(codeInfo.GetNativeCodeVersion() == nativeCodeVersion); DWORD offset = codeInfo.GetRelOffset(); Thread *pThread = GetThread(); // There is a race condition with the computation of `atCall`. Multiple threads could enter // this function (DoGcStress) at the same time. If one reads `*instrPtr` and sets `atCall` // to `true`, it will proceed to, lower down in this function, call `pThread->CommitGCStressInstructionUpdate()` // to replace the GCStress instruction at the call back to the original call instruction. // Other threads could then read `*instrPtr` and see the actual call instruction instead of the // call-specific GCStress instruction (INTERRUPT_INSTR_CALL[_32]). If `atCall` is set to false as // a result, then we'll do a GCStress as if this is a fully-interruptible code site, which is isn't, // which can leads to asserts (or, presumably, other failures). So, we have to check // `if (!IsGcCoverageInterruptInstruction(instrPtr))` after we read `*instrPtr`. bool atCall; bool afterCallProtect[2] = { false, false }; #if defined(TARGET_X86) || defined(TARGET_AMD64) BYTE instrVal = *instrPtr; forceStack[6] = &instrVal; // This is so I can see it fastchecked atCall = (instrVal == INTERRUPT_INSTR_CALL); if (instrVal == INTERRUPT_INSTR_PROTECT_BOTH_RET) { afterCallProtect[0] = afterCallProtect[1] = true; } else if (instrVal == INTERRUPT_INSTR_PROTECT_FIRST_RET) { afterCallProtect[0] = true; } else if (instrVal == INTERRUPT_INSTR_PROTECT_SECOND_RET) { afterCallProtect[1] = true; } #elif defined(TARGET_ARM) forceStack[6] = (WORD*)instrPtr; // This is so I can see it fastchecked size_t instrLen = GetARMInstructionLength(instrPtr); if (instrLen == 2) { WORD instrVal = *(WORD*)instrPtr; atCall = (instrVal == INTERRUPT_INSTR_CALL); afterCallProtect[0] = (instrVal == INTERRUPT_INSTR_PROTECT_RET); } else { _ASSERTE(instrLen == 4); DWORD instrVal32 = *(DWORD*)instrPtr; atCall = (instrVal32 == INTERRUPT_INSTR_CALL_32); afterCallProtect[0] = (instrVal32 == INTERRUPT_INSTR_PROTECT_RET_32); } #elif defined(TARGET_ARM64) DWORD instrVal = *(DWORD *)instrPtr; forceStack[6] = &instrVal; // This is so I can see it fastchecked atCall = (instrVal == INTERRUPT_INSTR_CALL); afterCallProtect[0] = (instrVal == INTERRUPT_INSTR_PROTECT_RET); #endif // _TARGET_* if (!IsGcCoverageInterruptInstruction(instrPtr)) { // This assert can fail if another thread changed original instruction to // GCCoverage Interrupt instruction between these two commands. Uncomment it // when threading issue gets resolved. // _ASSERTE(IsOriginalInstruction(instrPtr, gcCover, offset)); // Someone beat us to it, just go on running. return; } #ifdef TARGET_X86 /* are we at the very first instruction? If so, capture the register state */ bool bShouldUpdateProlog = true; if (gcCover->doingEpilogChecks) { if (offset == 0) { if ((gcCover->callerThread == 0) && (FastInterlockCompareExchangePointer(&gcCover->callerThread, pThread, 0) == 0)) { gcCover->callerRegs = *regs; gcCover->gcCount = GCHeapUtilities::GetGCHeap()->GetGcCount(); bShouldUpdateProlog = false; } else { // We have been in this routine before. Give up on epilog checking because // it is hard to insure that the saved caller register state is correct // This also has the effect of only doing the checking once per routine // (Even if there are multiple epilogs) gcCover->doingEpilogChecks = false; } } // If some other thread removes interrupt points, we abandon epilog testing // for this routine since the barrier at the beginning of the routine may not // be up anymore, and thus the caller context is now not guaranteed to be correct. // This should happen only very rarely so is not a big deal. if (gcCover->callerThread != pThread) gcCover->doingEpilogChecks = false; } instrVal = gcCover->savedCode[offset]; #endif // TARGET_X86 // <GCStress instruction update race> // Remove the interrupt instruction the next time we suspend the EE, // which should happen below in the call to StressHeap(). This is // done with the EE suspended so that we do not race with the executing // code on some other thread. If we allow that race, we may sometimes // get a STATUS_ACCESS_VIOLATION instead of the expected // STATUS_PRIVILEGED_INSTRUCTION because the OS has to inspect the code // stream to determine which exception code to raise. As a result, some // thread may take the exception due to the HLT, but by the time the OS // inspects the code stream, the HLT may be replaced with the original // code and it will just raise a STATUS_ACCESS_VIOLATION. #ifdef TARGET_X86 // only restore the original instruction if: // this is not the first instruction in the method's prolog, or // if it is, only if this is the second time we run in this method // note that if this is the second time in the prolog we've already disabled epilog checks if (offset != 0 || bShouldUpdateProlog) #endif pThread->PostGCStressInstructionUpdate((BYTE*)instrPtr, &gcCover->savedCode[offset]); #ifdef TARGET_X86 /* are we in a prolog or epilog? If so just test the unwind logic but don't actually do a GC since the prolog and epilog are not GC safe points */ if (gcCover->codeMan->IsInPrologOrEpilog(offset, gcCover->gcInfoToken, NULL)) { // We are not at a GC safe point so we can't Suspend EE (Suspend EE will yield to GC). // But we still have to update the GC Stress instruction. We do it directly without suspending // other threads, which means a race on updating is still possible. But for X86 the window of // race is so small that we could ignore it. We need a better solution if the race becomes a real problem. // see details about <GCStress instruction update race> in comments above pThread->CommitGCStressInstructionUpdate (); REGDISPLAY regDisp; CONTEXT copyRegs = *regs; pThread->Thread::InitRegDisplay(&regDisp, &copyRegs, true); pThread->UnhijackThread(); CodeManState codeManState; codeManState.dwIsSet = 0; // unwind out of the prolog or epilog gcCover->codeMan->UnwindStackFrame(&regDisp, &codeInfo, UpdateAllRegs, &codeManState, NULL); // Note we always doing the unwind, since that at does some checking (that we // unwind to a valid return address), but we only do the precise checking when // we are certain we have a good caller state if (gcCover->doingEpilogChecks) { // Confirm that we recovered our register state properly _ASSERTE(regDisp.PCTAddr == TADDR(gcCover->callerRegs.Esp)); // If a GC happened in this function, then the registers will not match // precisely. However there is still checks we can do. Also we can update // the saved register to its new value so that if a GC does not happen between // instructions we can recover (and since GCs are not allowed in the // prologs and epilogs, we get get complete coverage except for the first // instruction in the epilog (TODO: fix it for the first instr Case) _ASSERTE(pThread->PreemptiveGCDisabled()); // Epilogs should be in cooperative mode, no GC can happen right now. bool gcHappened = gcCover->gcCount != GCHeapUtilities::GetGCHeap()->GetGcCount(); checkAndUpdateReg(gcCover->callerRegs.Edi, *regDisp.GetEdiLocation(), gcHappened); checkAndUpdateReg(gcCover->callerRegs.Esi, *regDisp.GetEsiLocation(), gcHappened); checkAndUpdateReg(gcCover->callerRegs.Ebx, *regDisp.GetEbxLocation(), gcHappened); checkAndUpdateReg(gcCover->callerRegs.Ebp, *regDisp.GetEbpLocation(), gcHappened); gcCover->gcCount = GCHeapUtilities::GetGCHeap()->GetGcCount(); } return; } #endif // TARGET_X86 #if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) /* In non-fully interruptible code, if the EIP is just after a call instr means something different because it expects that we are IN the called method, not actually at the instruction just after the call. This is important, because until the called method returns, IT is responsible for protecting the return value. Thus just after a call instruction we have to protect EAX if the method being called returns a GC pointer. To figure this out, we need to stop AT the call so we can determine the target (and thus whether it returns one or more GC pointers), and then place a different interrupt instruction so that the GCCover harness protects the return value register(s) before doing the GC. This effectively simulates a hijack in non-fully interruptible code */ /* TODO. Simulating the hijack could cause problems in cases where the return register is not always a valid GC ref on the return offset. That could happen if we got to the return offset via a branch and not via return from the preceding call. However, this has not been an issue so far. Example: mov eax, someval test eax, eax jCC AFTERCALL call MethodWhichReturnsGCobject // return value is not used AFTERCALL: */ if (atCall) { // We need to update the GC Stress instruction. With partially-interruptible code // the call instruction is not a GC safe point so we can't use // StressHeap or UpdateGCStressInstructionWithoutGC to take care of updating; // So we just update the instruction directly. There are still chances for a race, // but it's not been a problem so far. // see details about <GCStress instruction update race> in comments above pThread->CommitGCStressInstructionUpdate (); PBYTE nextInstr; PBYTE target = getTargetOfCall((BYTE*) instrPtr, regs, (BYTE**)&nextInstr); if (target != 0) { ExecutableWriterHolder<BYTE> nextInstrWriterHolder(nextInstr, sizeof(DWORD)); if (!pThread->PreemptiveGCDisabled()) { // We are in preemptive mode in JITTed code. This implies that we are into IL stub // close to PINVOKE method. This call will never return objectrefs. #ifdef TARGET_ARM size_t instrLen = GetARMInstructionLength(nextInstr); if (instrLen == 2) *(WORD*)nextInstrWriterHolder.GetRW() = INTERRUPT_INSTR; else *(DWORD*)nextInstrWriterHolder.GetRW() = INTERRUPT_INSTR_32; #elif defined(TARGET_ARM64) *(DWORD*)nextInstrWriterHolder.GetRW() = INTERRUPT_INSTR; #else *nextInstrWriterHolder.GetRW() = INTERRUPT_INSTR; #endif } else { MethodDesc* targetMD = getTargetMethodDesc((PCODE)target); if (targetMD != 0) { // @Todo: possible race here, might need to be fixed if it become a problem. // It could become a problem if 64bit does partially interrupt work. // OK, we have the MD, mark the instruction after the CALL // appropriately ReplaceInstrAfterCall(nextInstrWriterHolder.GetRW(), targetMD); } } } // Must flush instruction cache before returning as instruction has been modified. // Note this needs to reach beyond the call by up to 4 bytes. FlushInstructionCache(GetCurrentProcess(), (LPCVOID)instrPtr, 10); // It's not GC safe point, the GC Stress instruction is // already committed and interrupt is already put at next instruction so we just return. return; } #else PORTABILITY_ASSERT("DoGcStress - NYI on this platform"); #endif // _TARGET_* bool enableWhenDone = false; if (!pThread->PreemptiveGCDisabled()) { pThread->DisablePreemptiveGC(); enableWhenDone = true; } #if 0 // TODO currently disabled. we only do a GC once per instruction location. /* note that for multiple threads, we can loose track and forget to set reset the interrupt after we executed an instruction, so some instruction points will not be executed twice, but we still ge350t very good coverage (perfect for single threaded cases) */ /* if we have not run this instruction in the past */ /* remember to wack it to an INTERUPT_INSTR again */ if (!gcCover->IsBitSetForOffset(offset)) { // gcCover->curInstr = instrPtr; gcCover->SetBitForOffset(offset); } #endif // 0 #if !defined(USE_REDIRECT_FOR_GCSTRESS) // // If we redirect for gc stress, we don't need this frame on the stack, // the redirection will push a resumable frame. // FrameWithCookie<ResumableFrame> frame(regs); frame.Push(pThread); #endif // USE_REDIRECT_FOR_GCSTRESS DWORD_PTR retValRegs[2] = { 0 }; UINT numberOfRegs = 0; if (afterCallProtect[0]) { #if defined(TARGET_AMD64) retValRegs[numberOfRegs++] = regs->Rax; #elif defined(TARGET_X86) retValRegs[numberOfRegs++] = regs->Eax; #elif defined(TARGET_ARM) retValRegs[numberOfRegs++] = regs->R0; #elif defined(TARGET_ARM64) retValRegs[numberOfRegs++] = regs->X0; #endif // TARGET_ARM64 } if (afterCallProtect[1]) { #if defined(TARGET_AMD64) && defined(TARGET_UNIX) retValRegs[numberOfRegs++] = regs->Rdx; #else // !TARGET_AMD64 || !TARGET_UNIX _ASSERTE(!"Not expected multi reg return with pointers."); #endif // !TARGET_AMD64 || !TARGET_UNIX } _ASSERTE(sizeof(OBJECTREF) == sizeof(DWORD_PTR)); GCFrame gcFrame(pThread, (OBJECTREF*)retValRegs, numberOfRegs, TRUE); MethodDesc *pMD = nativeCodeVersion.GetMethodDesc(); LOG((LF_GCROOTS, LL_EVERYTHING, "GCCOVER: Doing GC at method %s::%s offset 0x%x\n", pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, offset)); //------------------------------------------------------------------------- // Do the actual stress work // // BUG(github #10318) - when not using allocation contexts, the alloc lock // must be acquired here. Until fixed, this assert prevents random heap corruption. assert(GCHeapUtilities::UseThreadAllocationContexts()); GCHeapUtilities::GetGCHeap()->StressHeap(GetThread()->GetAllocContext()); // StressHeap can exit early w/o forcing a SuspendEE to trigger the instruction update // We can not rely on the return code to determine if the instruction update happened // Use HasPendingGCStressInstructionUpdate() to be certain. if(pThread->HasPendingGCStressInstructionUpdate()) UpdateGCStressInstructionWithoutGC (); // Must flush instruction cache before returning as instruction has been modified. FlushInstructionCache(GetCurrentProcess(), (LPCVOID)instrPtr, 4); CONSISTENCY_CHECK(!pThread->HasPendingGCStressInstructionUpdate()); if (numberOfRegs != 0) { if (afterCallProtect[0]) { #if defined(TARGET_AMD64) regs->Rax = retValRegs[0]; #elif defined(TARGET_X86) regs->Eax = retValRegs[0]; #elif defined(TARGET_ARM) regs->R0 = retValRegs[0]; #elif defined(TARGET_ARM64) regs->X[0] = retValRegs[0]; #else PORTABILITY_ASSERT("DoGCStress - return register"); #endif } if (afterCallProtect[1]) { #if defined(TARGET_AMD64) && defined(TARGET_UNIX) regs->Rdx = retValRegs[numberOfRegs - 1]; #else // !TARGET_AMD64 || !TARGET_UNIX _ASSERTE(!"Not expected multi reg return with pointers."); #endif // !TARGET_AMD64 || !TARGET_UNIX } } #if !defined(USE_REDIRECT_FOR_GCSTRESS) frame.Pop(pThread); #endif // USE_REDIRECT_FOR_GCSTRESS if (enableWhenDone) { BOOL b = GC_ON_TRANSITIONS(FALSE); // Don't do a GCStress 3 GC here pThread->EnablePreemptiveGC(); GC_ON_TRANSITIONS(b); } return; } #endif // HAVE_GCCOVER
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************/ /* gccover.cpp */ /****************************************************************************/ /* This file holds code that is designed to test GC pointer tracking in fully interruptible code. We basically do a GC everywhere we can in jitted code */ /****************************************************************************/ #include "common.h" #ifdef HAVE_GCCOVER #pragma warning(disable:4663) #include "eeconfig.h" #include "gms.h" #include "utsem.h" #include "gccover.h" #include "virtualcallstub.h" #include "threadsuspend.h" #if defined(TARGET_AMD64) || defined(TARGET_ARM) #include "gcinfodecoder.h" #endif #include "disassembler.h" /****************************************************************************/ MethodDesc* AsMethodDesc(size_t addr); static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE*nextInstr); #if defined(TARGET_ARM) || defined(TARGET_ARM64) static void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID codeStart); static bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 stopOffset, LPVOID codeStart); #endif // There is a call target instruction, try to find the MethodDesc for where target points to. // Returns nullptr if it can't find it. static MethodDesc* getTargetMethodDesc(PCODE target) { MethodDesc* targetMD = ExecutionManager::GetCodeMethodDesc(target); if (targetMD != nullptr) { // It is JIT/NGened call. return targetMD; } VirtualCallStubManager::StubKind vsdStubKind = VirtualCallStubManager::SK_UNKNOWN; VirtualCallStubManager *pVSDStubManager = VirtualCallStubManager::FindStubManager(target, &vsdStubKind); if (vsdStubKind != VirtualCallStubManager::SK_BREAKPOINT && vsdStubKind != VirtualCallStubManager::SK_UNKNOWN) { // It is a VSD stub manager. DispatchToken token(VirtualCallStubManager::GetTokenFromStubQuick(pVSDStubManager, target, vsdStubKind)); _ASSERTE(token.IsValid()); return VirtualCallStubManager::GetInterfaceMethodDescFromToken(token); } if (RangeSectionStubManager::GetStubKind(target) == STUB_CODE_BLOCK_PRECODE) { // The address looks like a value stub, try to get the method descriptor. return MethodDesc::GetMethodDescFromStubAddr(target, TRUE); } return nullptr; } bool IsGcCoverageInterruptInstruction(PBYTE instrPtr) { UINT32 instrVal; #if defined(TARGET_ARM64) instrVal = *reinterpret_cast<UINT32*>(instrPtr); #elif defined(TARGET_ARM) size_t instrLen = GetARMInstructionLength(instrPtr); if (instrLen == 2) { instrVal = *reinterpret_cast<UINT16*>(instrPtr); } else { instrVal = *reinterpret_cast<UINT32*>(instrPtr); } #else // x64 and x86 instrVal = *instrPtr; #endif return IsGcCoverageInterruptInstructionVal(instrVal); } bool IsOriginalInstruction(PBYTE instrPtr, GCCoverageInfo* gcCover, DWORD offset) { #if defined(TARGET_ARM64) UINT32 instrVal = *reinterpret_cast<UINT32*>(instrPtr); UINT32 origInstrVal = *reinterpret_cast<UINT32*>(gcCover->savedCode + offset); return (instrVal == origInstrVal); #elif defined(TARGET_ARM) size_t instrLen = GetARMInstructionLength(instrPtr); if (instrLen == 2) { UINT16 instrVal = *reinterpret_cast<UINT16*>(instrPtr); UINT16 origInstrVal = *reinterpret_cast<UINT16*>(gcCover->savedCode + offset); return (instrVal == origInstrVal); } else { _ASSERTE(instrLen == 4); UINT32 instrVal = *reinterpret_cast<UINT32*>(instrPtr); UINT32 origInstrVal = *reinterpret_cast<UINT32*>(gcCover->savedCode + offset); return (instrVal == origInstrVal); } #else // x64 and x86 UINT8 instrVal = *reinterpret_cast<UINT8*>(instrPtr); UINT8 origInstrVal = gcCover->savedCode[offset]; return (instrVal == origInstrVal); #endif } void SetupAndSprinkleBreakpoints( NativeCodeVersion nativeCodeVersion, EECodeInfo * pCodeInfo, IJitManager::MethodRegionInfo methodRegionInfo, BOOL fZapped ) { _ASSERTE(!nativeCodeVersion.IsNull()); // Allocate room for the GCCoverageInfo and copy of the method instructions MethodDesc *pMD = nativeCodeVersion.GetMethodDesc(); size_t memSize = sizeof(GCCoverageInfo) + methodRegionInfo.hotSize + methodRegionInfo.coldSize; GCCoverageInfo* gcCover = (GCCoverageInfo*)(void*) pMD->GetLoaderAllocator()->GetHighFrequencyHeap()->AllocAlignedMem(memSize, CODE_SIZE_ALIGN); memset(gcCover, 0, sizeof(GCCoverageInfo)); gcCover->methodRegion = methodRegionInfo; gcCover->codeMan = pCodeInfo->GetCodeManager(); gcCover->gcInfoToken = pCodeInfo->GetGCInfoToken(); gcCover->callerThread = 0; gcCover->doingEpilogChecks = true; gcCover->SprinkleBreakpoints(gcCover->savedCode, gcCover->methodRegion.hotStartAddress, gcCover->methodRegion.hotSize, 0, fZapped); // This is not required for ARM* as the above call does the work for both hot & cold regions #if !defined(TARGET_ARM) && !defined(TARGET_ARM64) if (gcCover->methodRegion.coldSize != 0) { gcCover->SprinkleBreakpoints(gcCover->savedCode + gcCover->methodRegion.hotSize, gcCover->methodRegion.coldStartAddress, gcCover->methodRegion.coldSize, gcCover->methodRegion.hotSize, fZapped); } #endif nativeCodeVersion.SetGCCoverageInfo(gcCover); } void SetupAndSprinkleBreakpointsForJittedMethod(NativeCodeVersion nativeCodeVersion, PCODE codeStart ) { _ASSERTE(!nativeCodeVersion.IsNull()); EECodeInfo codeInfo(codeStart); _ASSERTE(codeInfo.IsValid()); _ASSERTE(codeInfo.GetRelOffset() == 0); IJitManager::MethodRegionInfo methodRegionInfo; codeInfo.GetMethodRegionInfo(&methodRegionInfo); _ASSERTE(PCODEToPINSTR(codeStart) == methodRegionInfo.hotStartAddress); #ifdef _DEBUG if (!g_pConfig->SkipGCCoverage(nativeCodeVersion.GetMethodDesc()->GetModule()->GetSimpleName())) #endif SetupAndSprinkleBreakpoints(nativeCodeVersion, &codeInfo, methodRegionInfo, FALSE ); } /****************************************************************************/ /* called when a method is first jitted when GCStress level 4 or 8 is on */ void SetupGcCoverage(NativeCodeVersion nativeCodeVersion, BYTE* methodStartPtr) { _ASSERTE(!nativeCodeVersion.IsNull()); #ifdef _DEBUG if (!g_pConfig->ShouldGcCoverageOnMethod(nativeCodeVersion.GetMethodDesc()->m_pszDebugMethodName)) { return; } #endif // Ideally we would assert here that m_GcCover is NULL. // // However, we can't do that (at least not yet), because we may // invoke this method more than once on a given // MethodDesc. Examples include prejitted methods and rejitted // methods. // // In the prejit case, we can't safely re-instrument an already // instrumented method. By bailing out here, we will use the // original instrumentation, which should still be valid as // the method code has not changed. // // In the rejit case, the old method code may still be active and // instrumented, so we need to preserve that gc cover info. By // bailing out here we will skip instrumenting the rejitted native // code, and since the rejitted method does not get instrumented // we should be able to tolerate that the gc cover info does not // match. if (nativeCodeVersion.GetGCCoverageInfo() != NULL) { return; } PCODE codeStart = (PCODE) methodStartPtr; SetupAndSprinkleBreakpointsForJittedMethod(nativeCodeVersion, codeStart); } void ReplaceInstrAfterCall(PBYTE instrToReplace, MethodDesc* callMD) { ReturnKind returnKind = callMD->GetReturnKind(true); if (!IsValidReturnKind(returnKind)) { #if defined(TARGET_AMD64) && defined(TARGET_UNIX) _ASSERTE(!"Unexpected return kind for x64 Unix."); #else // SKip GC coverage after the call. return; #endif } _ASSERTE(IsValidReturnKind(returnKind)); bool ispointerKind = IsPointerReturnKind(returnKind); #ifdef TARGET_ARM size_t instrLen = GetARMInstructionLength(instrToReplace); bool protectReturn = ispointerKind; if (protectReturn) if (instrLen == 2) *(WORD*)instrToReplace = INTERRUPT_INSTR_PROTECT_RET; else *(DWORD*)instrToReplace = INTERRUPT_INSTR_PROTECT_RET_32; else if (instrLen == 2) *(WORD*)instrToReplace = INTERRUPT_INSTR; else *(DWORD*)instrToReplace = INTERRUPT_INSTR_32; #elif defined(TARGET_ARM64) bool protectReturn = ispointerKind; if (protectReturn) *(DWORD*)instrToReplace = INTERRUPT_INSTR_PROTECT_RET; else *(DWORD*)instrToReplace = INTERRUPT_INSTR; #elif defined(TARGET_AMD64) || defined(TARGET_X86) if (ispointerKind) { bool protectRegister[2] = { false, false }; bool moreRegisters = false; ReturnKind fieldKind1 = ExtractRegReturnKind(returnKind, 0, moreRegisters); if (IsPointerFieldReturnKind(fieldKind1)) { protectRegister[0] = true; } if (moreRegisters) { ReturnKind fieldKind2 = ExtractRegReturnKind(returnKind, 1, moreRegisters); if (IsPointerFieldReturnKind(fieldKind2)) { protectRegister[1] = true; } } _ASSERTE(!moreRegisters); if (protectRegister[0] && !protectRegister[1]) { *instrToReplace = INTERRUPT_INSTR_PROTECT_FIRST_RET; } else { #if !defined(TARGET_AMD64) || !defined(TARGET_UNIX) _ASSERTE(!"Not expected multi reg return with pointers."); #endif // !TARGET_AMD64 || !TARGET_UNIX if (!protectRegister[0] && protectRegister[1]) { *instrToReplace = INTERRUPT_INSTR_PROTECT_SECOND_RET; } else { _ASSERTE(protectRegister[0] && protectRegister[1]); *instrToReplace = INTERRUPT_INSTR_PROTECT_BOTH_RET; } } } else { *instrToReplace = INTERRUPT_INSTR; } #else _ASSERTE(!"not implemented for platform"); #endif } #ifdef TARGET_AMD64 class GCCoverageRangeEnumerator { private: ICodeManager *m_pCodeManager; GCInfoToken m_pvGCTable; BYTE *m_codeStart; BYTE *m_codeEnd; BYTE *m_curFuncletEnd; BYTE *m_nextFunclet; BYTE* GetNextFunclet () { if (m_nextFunclet == NULL) return m_codeEnd; BYTE *pCurFunclet = (BYTE*)EECodeInfo::findNextFunclet(m_nextFunclet, m_codeEnd - m_nextFunclet, (LPVOID*)&m_curFuncletEnd); m_nextFunclet = (pCurFunclet != NULL) ? m_curFuncletEnd : NULL; if (pCurFunclet == NULL) return m_codeEnd; LOG((LF_JIT, LL_INFO1000, "funclet range %p-%p\n", pCurFunclet, m_curFuncletEnd)); // // workaround - adjust the funclet end address to exclude uninterruptible // code at the end of each funclet. The jit currently puts data like // jump tables in the code portion of the allocation, instead of the // read-only portion. // // TODO: If the entire range is uninterruptible, we should skip the // entire funclet. // unsigned ofsLastInterruptible = m_pCodeManager->FindEndOfLastInterruptibleRegion( static_cast<unsigned int>(pCurFunclet - m_codeStart), static_cast<unsigned int>(m_curFuncletEnd - m_codeStart), m_pvGCTable); if (ofsLastInterruptible) { m_curFuncletEnd = m_codeStart + ofsLastInterruptible; LOG((LF_JIT, LL_INFO1000, "adjusted end to %p\n", m_curFuncletEnd)); } return pCurFunclet; } public: GCCoverageRangeEnumerator (ICodeManager *pCodeManager, GCInfoToken pvGCTable, BYTE *codeStart, SIZE_T codeSize) { m_pCodeManager = pCodeManager; m_pvGCTable = pvGCTable; m_codeStart = codeStart; m_codeEnd = codeStart + codeSize; m_nextFunclet = codeStart; GetNextFunclet(); } // Checks that the given pointer is inside of a range where gc should be // tested. If not, increments the pointer until it is, and returns the // new pointer. BYTE *EnsureInRange (BYTE *cur) { if (cur >= m_curFuncletEnd) { cur = GetNextFunclet(); } return cur; } BYTE *SkipToNextRange () { return GetNextFunclet(); } }; #endif // TARGET_AMD64 /****************************************************************************/ /* sprinkle interrupt instructions that will stop on every GCSafe location regionOffsetAdj - Represents the offset of the current region from the beginning of the method (is 0 for hot region) */ void GCCoverageInfo::SprinkleBreakpoints( BYTE * saveAddr, PCODE pCode, size_t codeSize, size_t regionOffsetAdj, BOOL fZapped) { #if (defined(TARGET_X86) || defined(TARGET_AMD64)) && USE_DISASSEMBLER BYTE * codeStart = (BYTE *)pCode; ExecutableWriterHolder<BYTE> codeWriterHolder; size_t writeableOffset; memcpy(saveAddr, codeStart, codeSize); // For prejitted code we have to remove the write-protect on the code page if (fZapped) { DWORD oldProtect; ClrVirtualProtect(codeStart, codeSize, PAGE_EXECUTE_READWRITE, &oldProtect); writeableOffset = 0; } else { codeWriterHolder = ExecutableWriterHolder<BYTE>(codeStart, codeSize); writeableOffset = codeWriterHolder.GetRW() - codeStart; } PBYTE cur; BYTE* codeEnd = codeStart + codeSize; EECodeInfo codeInfo((PCODE)codeStart); static ConfigDWORD fGcStressOnDirectCalls; // ConfigDWORD must be a static variable #ifdef TARGET_AMD64 GCCoverageRangeEnumerator rangeEnum(codeMan, gcInfoToken, codeStart, codeSize); GcInfoDecoder safePointDecoder(gcInfoToken, (GcInfoDecoderFlags)0, 0); bool fSawPossibleSwitch = false; #endif cur = codeStart; Disassembler disassembler; // When we find a direct call instruction and we are partially-interruptible // we determine the target and place a breakpoint after the call // to simulate the hijack // However, we need to wait until we disassemble the instruction // after the call in order to put the breakpoint or we'll mess up // the disassembly // This variable is non-null if the previous instruction was a direct call, // and we have found it's target MethodDesc MethodDesc* prevDirectCallTargetMD = NULL; /* TODO. Simulating the hijack could cause problems in cases where the return register is not always a valid GC ref on the return offset. That could happen if we got to the return offset via a branch and not via return from the preceding call. However, this has not been an issue so far. Example: mov eax, someval test eax, eax jCC AFTERCALL call MethodWhichReturnsGCobject // return value is not used AFTERCALL: */ while (cur < codeEnd) { _ASSERTE(*cur != INTERRUPT_INSTR && *cur != INTERRUPT_INSTR_CALL); MethodDesc* targetMD = NULL; InstructionType instructionType; size_t len = disassembler.DisassembleInstruction(cur, codeEnd - cur, &instructionType); #ifdef TARGET_AMD64 // REVISIT_TODO apparently the jit does not use the entire RUNTIME_FUNCTION range // for code. It uses some for switch tables. Because the first few offsets // may be decodable as instructions, we can't reason about where we should // encounter invalid instructions. However, we do not want to silently skip // large chunks of methods just because the JIT started emitting a new // instruction, so only assume it is a switch table if we've seen the switch // code (an indirect unconditional jump) if ((len == 0) && fSawPossibleSwitch) { LOG((LF_JIT, LL_WARNING, "invalid instruction at %p (possibly start of switch table)\n", cur)); cur = rangeEnum.SkipToNextRange(); prevDirectCallTargetMD = NULL; fSawPossibleSwitch = false; continue; } #endif _ASSERTE(len > 0); _ASSERTE(len <= (size_t)(codeEnd-cur)); switch(instructionType) { case InstructionType::Call_IndirectUnconditional: #ifdef TARGET_AMD64 if(safePointDecoder.IsSafePoint((UINT32)(cur + len - codeStart + regionOffsetAdj))) #endif { *(cur + writeableOffset) = INTERRUPT_INSTR_CALL; // return value. May need to protect } break; case InstructionType::Call_DirectUnconditional: if(fGcStressOnDirectCalls.val(CLRConfig::INTERNAL_GcStressOnDirectCalls)) { #ifdef TARGET_AMD64 if(safePointDecoder.IsSafePoint((UINT32)(cur + len - codeStart + regionOffsetAdj))) #endif { PBYTE nextInstr; PBYTE target = getTargetOfCall(cur, NULL, &nextInstr); if (target != 0) { targetMD = getTargetMethodDesc((PCODE)target); } } } break; #ifdef TARGET_AMD64 case InstructionType::Branch_IndirectUnconditional: fSawPossibleSwitch = true; break; #endif default: // Clang issues an error saying that some enum values are not handled in the switch, that's intended break; } if (prevDirectCallTargetMD != 0) { ReplaceInstrAfterCall(cur + writeableOffset, prevDirectCallTargetMD); } // For fully interruptible code, we end up whacking every instruction // to INTERRUPT_INSTR. For non-fully interruptible code, we end // up only touching the call instructions (specially so that we // can really do the GC on the instruction just after the call). size_t dwRelOffset = (cur - codeStart) + regionOffsetAdj; _ASSERTE(FitsIn<DWORD>(dwRelOffset)); if (codeMan->IsGcSafe(&codeInfo, static_cast<DWORD>(dwRelOffset))) { *(cur + writeableOffset) = INTERRUPT_INSTR; } #ifdef TARGET_X86 // we will whack every instruction in the prolog and epilog to make certain // our unwinding logic works there. if (codeMan->IsInPrologOrEpilog((cur - codeStart) + (DWORD)regionOffsetAdj, gcInfoToken, NULL)) { *(cur + writeableOffset) = INTERRUPT_INSTR; } #endif // If we couldn't find the method desc targetMD is zero prevDirectCallTargetMD = targetMD; cur += len; #ifdef TARGET_AMD64 PBYTE newCur = rangeEnum.EnsureInRange(cur); if(newCur != cur) { prevDirectCallTargetMD = NULL; cur = newCur; fSawPossibleSwitch = false; } #endif } // If we are not able to place an interrupt at the first instruction, this means that // we are partially interruptible with no prolog. Just don't bother to do the // the epilog checks, since the epilog will be trivial (a single return instr) assert(codeSize > 0); if ((regionOffsetAdj==0) && (*codeStart != INTERRUPT_INSTR)) doingEpilogChecks = false; #elif defined(TARGET_ARM) || defined(TARGET_ARM64) //Save the method code from hotRegion memcpy(saveAddr, (BYTE*)methodRegion.hotStartAddress, methodRegion.hotSize); if (methodRegion.coldSize > 0) { //Save the method code from coldRegion memcpy(saveAddr+methodRegion.hotSize, (BYTE*)methodRegion.coldStartAddress, methodRegion.coldSize); } // For prejitted code we have to remove the write-protect on the code page if (fZapped) { DWORD oldProtect; ClrVirtualProtect((BYTE*)methodRegion.hotStartAddress, methodRegion.hotSize, PAGE_EXECUTE_READWRITE, &oldProtect); if (methodRegion.coldSize > 0) { ClrVirtualProtect((BYTE*)methodRegion.coldStartAddress, methodRegion.coldSize, PAGE_EXECUTE_READWRITE, &oldProtect); } } GcInfoDecoder safePointDecoder(gcInfoToken, (GcInfoDecoderFlags)0, 0); assert(methodRegion.hotSize > 0); #ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED safePointDecoder.EnumerateSafePoints(&replaceSafePointInstructionWithGcStressInstr,this); #endif // PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED safePointDecoder.EnumerateInterruptibleRanges(&replaceInterruptibleRangesWithGcStressInstr, this); FlushInstructionCache(GetCurrentProcess(), (BYTE*)methodRegion.hotStartAddress, methodRegion.hotSize); if (methodRegion.coldSize > 0) { FlushInstructionCache(GetCurrentProcess(), (BYTE*)methodRegion.coldStartAddress, methodRegion.coldSize); } #else _ASSERTE(!"not implemented for platform"); #endif // TARGET_X86 } #if defined(TARGET_ARM) || defined(TARGET_ARM64) #ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID pGCCover) { PCODE pCode = NULL; IJitManager::MethodRegionInfo *ptr = &(((GCCoverageInfo*)pGCCover)->methodRegion); //Get code address from offset if (safePointOffset < ptr->hotSize) pCode = ptr->hotStartAddress + safePointOffset; else if(safePointOffset - ptr->hotSize < ptr->coldSize) { SIZE_T coldOffset = safePointOffset - ptr->hotSize; pCode = ptr->coldStartAddress + coldOffset; } else { //For some methods( eg MCCTest.MyClass.GetSum2 in test file jit\jit64\mcc\interop\mcc_i07.il) gcinfo points to a safepoint //beyond the length of the method. So commenting the below assert. //_ASSERTE(safePointOffset - ptr->hotSize < ptr->coldSize); return; } PBYTE instrPtr = (BYTE*)PCODEToPINSTR(pCode); // For code sequences of the type // BL func1 // BL func2 // Safe point 1 // mov r1 r0 // Safe point 2 // Both the above safe points instruction must be replaced with gcStress instruction. // However as the first safe point is already replaced with gcstress instruction, decoding of the call // instruction will fail when processing for the 2nd safe point. Therefore saved instruction must be used instead of // instrPtr for decoding the call instruction. PBYTE savedInstrPtr = ((GCCoverageInfo*)pGCCover)->savedCode + safePointOffset; //Determine if instruction before the safe point is call using immediate (BLX Imm) or call by register (BLX Rm) BOOL instructionIsACallThroughRegister = FALSE; BOOL instructionIsACallThroughImmediate = FALSE; #if defined(TARGET_ARM) // POSSIBLE BUG: Note that we are looking backwards by 2 or 4 bytes, looking for particular call instruction encodings. // However, we don't know if the previous instruction is 2 bytes or 4 bytes. Looking back 2 bytes could be looking into // the middle of a 4-byte instruction. The only safe way to do this is by walking forward from the first instruction of // the function. // call by register instruction is two bytes (BL<c> Reg T1 encoding) WORD instr = *((WORD*)savedInstrPtr - 1); instr = instr & 0xff87; if ((instr ^ 0x4780) == 0) { // It is call by register instructionIsACallThroughRegister = TRUE; } else { // call using immediate instructions are 4 bytes (BL<c> <label> T1 encoding) instr = *((WORD*)savedInstrPtr - 2); instr = instr & 0xf800; if ((instr ^ 0xf000) == 0) { if ((*(((WORD*)savedInstrPtr) - 1) & 0xd000) == 0xd000) { // It is call by immediate instructionIsACallThroughImmediate = TRUE; } } } #elif defined(TARGET_ARM64) DWORD instr = *((DWORD*)savedInstrPtr - 1); // Is the call through a register or an immediate offset // BL // Encoding: 0x94000000 & [imm26] if ((instr & 0xFC000000) == 0x94000000) { instructionIsACallThroughImmediate = TRUE; } // BLR // Encoding: 0xD63F0000 & (Rn<<5) else if ((instr & 0xFFFFFC1F) == 0xD63F0000) { instructionIsACallThroughRegister = TRUE; } #endif // _TARGET_XXXX_ // safe point must always be after a call instruction // and cannot be both call by register & immediate // The safe points are also marked at jump calls( a special variant of // tail call). However that call site will never appear on the stack. // So commenting the assert for now. As for such places the previous // instruction will not be a call instruction. //_ASSERTE(instructionIsACallThroughRegister ^ instructionIsACallThroughImmediate); #if defined(TARGET_ARM) size_t instrLen = sizeof(WORD); #else size_t instrLen = sizeof(DWORD); #endif ExecutableWriterHolder<BYTE> instrPtrWriterHolder(instrPtr - instrLen, 2 * instrLen); if(instructionIsACallThroughRegister) { // If it is call by register then cannot know MethodDesc so replace the call instruction with illegal instruction // safe point will be replaced with appropriate illegal instruction at execution time when reg value is known #if defined(TARGET_ARM) *((WORD*)instrPtrWriterHolder.GetRW()) = INTERRUPT_INSTR_CALL; #elif defined(TARGET_ARM64) *((DWORD*)instrPtrWriterHolder.GetRW()) = INTERRUPT_INSTR_CALL; #endif // _TARGET_XXXX_ } else if(instructionIsACallThroughImmediate) { // If it is call by immediate then find the methodDesc PBYTE nextInstr; PBYTE target = getTargetOfCall((PBYTE)((WORD*)savedInstrPtr-2), NULL, &nextInstr); if (target != 0) { //Target is calculated wrt the saved instruction pointer //Find the real target wrt the real instruction pointer int delta = static_cast<int>(target - savedInstrPtr); target = delta + instrPtr; MethodDesc* targetMD = getTargetMethodDesc((PCODE)target); if (targetMD != 0) { // The instruction about to be replaced cannot already be a gcstress instruction _ASSERTE(!IsGcCoverageInterruptInstruction(instrPtr)); // // When applying GC coverage breakpoints at native image load time, the code here runs // before eager fixups are applied for the module being loaded. The direct call target // never requires restore, however it is possible that it is initially in an invalid state // and remains invalid until one or more eager fixups are applied. // // ReplaceInstrAfterCall consults the method signature, meaning it consults the // metadata in the owning module. For generic instantiations stored in non-preferred // modules, reaching the owning module requires following the module override pointer for // the enclosing MethodTable. In this case, the module override pointer is generally // invalid until an associated eager fixup is applied. // // In situations like this, ReplaceInstrAfterCall will try to dereference an // unresolved fixup and will AV. // // Given all of this, skip the ReplaceInstrAfterCall call by default to avoid // unexpected AVs. This implies leaving out the GC coverage breakpoints for direct calls // unless COMPlus_GcStressOnDirectCalls=1 is explicitly set in the environment. // static ConfigDWORD fGcStressOnDirectCalls; if (fGcStressOnDirectCalls.val(CLRConfig::INTERNAL_GcStressOnDirectCalls)) { ReplaceInstrAfterCall(instrPtrWriterHolder.GetRW() + instrLen, targetMD); } } } } } #endif // PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED //Replaces the provided interruptible range with corresponding 2 or 4 byte gcStress illegal instruction bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 stopOffset, LPVOID pGCCover) { PCODE pCode = NULL; PBYTE rangeStart = NULL; PBYTE rangeStop = NULL; //Interruptible range can span across hot & cold region int acrossHotRegion = 1; // 1 means range is not across end of hot region & 2 is when it is across end of hot region //Find the code addresses from offsets IJitManager::MethodRegionInfo *ptr = &(((GCCoverageInfo*)pGCCover)->methodRegion); if (startOffset < ptr->hotSize) { pCode = ptr->hotStartAddress + startOffset; rangeStart = (BYTE*)PCODEToPINSTR(pCode); if(stopOffset <= ptr->hotSize) { pCode = ptr->hotStartAddress + stopOffset; rangeStop = (BYTE*)PCODEToPINSTR(pCode); } else { //Interruptible range is spanning across hot & cold region pCode = ptr->hotStartAddress + ptr->hotSize; rangeStop = (BYTE*)PCODEToPINSTR(pCode); acrossHotRegion++; } } else { SIZE_T coldOffset = startOffset - ptr->hotSize; _ASSERTE(coldOffset < ptr->coldSize); pCode = ptr->coldStartAddress + coldOffset; rangeStart = (BYTE*)PCODEToPINSTR(pCode); coldOffset = stopOffset - ptr->hotSize; _ASSERTE(coldOffset <= ptr->coldSize); pCode = ptr->coldStartAddress + coldOffset; rangeStop = (BYTE*)PCODEToPINSTR(pCode); } // Need to do two iterations if interruptible range spans across hot & cold region while(acrossHotRegion--) { ExecutableWriterHolder<BYTE> instrPtrWriterHolder(rangeStart, rangeStop - rangeStart); PBYTE instrPtrRW = instrPtrWriterHolder.GetRW(); PBYTE rangeStopRW = instrPtrRW + (rangeStop - rangeStart); while(instrPtrRW < rangeStopRW) { // The instruction about to be replaced cannot already be a gcstress instruction _ASSERTE(!IsGcCoverageInterruptInstruction(instrPtrRW)); #if defined(TARGET_ARM) size_t instrLen = GetARMInstructionLength(instrPtrRW); if (instrLen == 2) *((WORD*)instrPtrRW) = INTERRUPT_INSTR; else { *((DWORD*)instrPtrRW) = INTERRUPT_INSTR_32; } instrPtrRW += instrLen; #elif defined(TARGET_ARM64) *((DWORD*)instrPtrRW) = INTERRUPT_INSTR; instrPtrRW += 4; #endif // TARGET_XXXX_ } if(acrossHotRegion) { //Set rangeStart & rangeStop for the second iteration _ASSERTE(acrossHotRegion==1); rangeStart = (BYTE*)PCODEToPINSTR(ptr->coldStartAddress); pCode = ptr->coldStartAddress + stopOffset - ptr->hotSize; rangeStop = (BYTE*)PCODEToPINSTR(pCode); } } return FALSE; } #endif // defined(TARGET_ARM) || defined(TARGET_ARM64) static size_t getRegVal(unsigned regNum, PCONTEXT regs) { return *getRegAddr(regNum, regs); } /****************************************************************************/ static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE* nextInstr) { BYTE sibindexadj = 0; BYTE baseadj = 0; WORD displace = 0; // In certain situations, the instruction bytes are read from a different // location than the actual bytes being executed. // When decoding the instructions of a method which is sprinkled with // TRAP instructions for GCStress, we decode the bytes from a copy // of the instructions stored before the traps-for-gc were inserted. // However, the PC-relative addressing/displacement of the CALL-target // will still be with respect to the currently executing PC. // So, if a register context is available, we pick the PC from it // (for address calculation purposes only). PBYTE PC = (regs) ? (PBYTE)GetIP(regs) : instrPtr; #ifdef TARGET_ARM if((instrPtr[1] & 0xf0) == 0xf0) // direct call { int imm32 = GetThumb2BlRel24((UINT16 *)instrPtr); *nextInstr = instrPtr + 4; return PC + 4 + imm32; } else if(((instrPtr[1] & 0x47) == 0x47) & ((instrPtr[0] & 0x80) == 0x80)) // indirect call { *nextInstr = instrPtr + 2; unsigned int regnum = (instrPtr[0] & 0x78) >> 3; return (BYTE *)getRegVal(regnum, regs); } else { return 0; // Not a call. } #elif defined(TARGET_ARM64) if (((*reinterpret_cast<DWORD*>(instrPtr)) & 0xFC000000) == 0x94000000) { // call through immediate int imm26 = ((*((DWORD*)instrPtr)) & 0x03FFFFFF)<<2; // SignExtend the immediate value. imm26 = (imm26 << 4) >> 4; *nextInstr = instrPtr + 4; return PC + imm26; } else if (((*reinterpret_cast<DWORD*>(instrPtr)) & 0xFFFFC1F) == 0xD63F0000) { // call through register *nextInstr = instrPtr + 4; unsigned int regnum = ((*(DWORD*)instrPtr) >> 5) & 0x1F; return (BYTE *)getRegVal(regnum, regs); } else { return 0; // Fail } #endif #ifdef TARGET_AMD64 if ((instrPtr[0] & 0xf0) == REX_PREFIX_BASE) { static_assert_no_msg(REX_SIB_BASE_EXT == REX_MODRM_RM_EXT); if (instrPtr[0] & REX_SIB_BASE_EXT) baseadj = 8; if (instrPtr[0] & REX_SIB_INDEX_EXT) sibindexadj = 8; instrPtr++; } #endif // TARGET_AMD64 if (instrPtr[0] == 0xE8) { // Direct Relative Near *nextInstr = instrPtr + 5; size_t base = (size_t) PC + 5; INT32 displacement = (INT32) ( ((UINT32)instrPtr[1]) + (((UINT32)instrPtr[2]) << 8) + (((UINT32)instrPtr[3]) << 16) + (((UINT32)instrPtr[4]) << 24) ); // Note that the signed displacement is sign-extended // to 64-bit on AMD64 return((PBYTE)(base + (SSIZE_T)displacement)); } if (instrPtr[0] == 0xFF) { // Indirect Absolute Near _ASSERTE(regs); BYTE mod = (instrPtr[1] & 0xC0) >> 6; BYTE rm = (instrPtr[1] & 0x7); PBYTE result; switch (mod) { case 0: case 1: case 2: if (rm == 4) { // // Get values from the SIB byte // BYTE ss = (instrPtr[2] & 0xC0) >> 6; BYTE index = (instrPtr[2] & 0x38) >> 3; BYTE base = (instrPtr[2] & 0x7); // // Get starting value // if ((mod == 0) && (base == 5)) { result = 0; } else { result = (BYTE *)getRegVal(baseadj + base, regs); } // // Add in the [index] // if (index != 0x4) { result = result + (getRegVal(sibindexadj + index, regs) << ss); } // // Finally add in the offset // if (mod == 0) { if (base == 5) { result = result + *((int *)&instrPtr[3]); displace += 7; } else { displace += 3; } } else if (mod == 1) { result = result + *((char *)&instrPtr[3]); displace += 4; } else { // == 2 result = result + *((int *)&instrPtr[3]); displace += 7; } } else { // // Get the value we need from the register. // if ((mod == 0) && (rm == 5)) { #ifdef TARGET_AMD64 // at this point instrPtr should be pointing at the beginning // of the byte sequence for the call instruction. the operand // is a RIP-relative address from the next instruction, so to // calculate the address of the next instruction we need to // jump forward 6 bytes: 1 for the opcode, 1 for the ModRM byte, // and 4 for the operand. see AMD64 Programmer's Manual Vol 3. result = PC + 6; #else result = 0; #endif // TARGET_AMD64 } else { result = (PBYTE)getRegVal(baseadj + rm, regs); } if (mod == 0) { if (rm == 5) { result = result + *((int *)&instrPtr[2]); displace += 6; } else { displace += 2; } } else if (mod == 1) { result = result + *((char *)&instrPtr[2]); displace += 3; } else { // == 2 result = result + *((int *)&instrPtr[2]); displace += 6; } } // // Now dereference thru the result to get the resulting IP. // result = (PBYTE)(*((PBYTE *)result)); break; case 3: default: result = (PBYTE)getRegVal(baseadj + rm, regs); displace += 2; break; } *nextInstr = instrPtr + displace; return result; } return(0); // Fail } /****************************************************************************/ #ifdef TARGET_X86 void checkAndUpdateReg(DWORD& origVal, DWORD curVal, bool gcHappened) { if (origVal == curVal) return; // If these asserts go off, they indicate either that unwinding out of a epilog is wrong or that // the validation infrastructure has got a bug. _ASSERTE(gcHappened); // If the register values are different, a GC must have happened _ASSERTE(GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE*) size_t(origVal))); // And the pointers involved are on the GCHeap _ASSERTE(GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE*) size_t(curVal))); origVal = curVal; // this is now the best estimate of what should be returned. } #endif // TARGET_X86 int GCcoverCount = 0; void* forceStack[8]; /****************************************************************************/ bool IsGcCoverageInterrupt(LPVOID ip) { // Determine if the IP is valid for a GC marker first, before trying to dereference it to check the instruction EECodeInfo codeInfo(reinterpret_cast<PCODE>(ip)); if (!codeInfo.IsValid()) { return false; } NativeCodeVersion nativeCodeVersion = codeInfo.GetNativeCodeVersion(); _ASSERTE(!nativeCodeVersion.IsNull()); GCCoverageInfo *gcCover = nativeCodeVersion.GetGCCoverageInfo(); if (gcCover == nullptr) { return false; } PBYTE instrPtr = reinterpret_cast<PBYTE>(ip); if (IsGcCoverageInterruptInstruction(instrPtr)) { return true; } if (IsOriginalInstruction(instrPtr, gcCover, codeInfo.GetRelOffset())) { // Another thread may have already changed the code back to the original. return true; } return false; } // Remove the GcCoverage interrupt instruction, and restore the // original instruction. Only one instruction must be used, // because multiple threads can be executing the same code stream. void RemoveGcCoverageInterrupt(TADDR instrPtr, BYTE * savedInstrPtr, GCCoverageInfo* gcCover, DWORD offset) { ExecutableWriterHolder<void> instrPtrWriterHolder((void*)instrPtr, 4); #ifdef TARGET_ARM if (GetARMInstructionLength(savedInstrPtr) == 2) *(WORD *)instrPtrWriterHolder.GetRW() = *(WORD *)savedInstrPtr; else *(DWORD *)instrPtrWriterHolder.GetRW() = *(DWORD *)savedInstrPtr; #elif defined(TARGET_ARM64) *(DWORD *)instrPtrWriterHolder.GetRW() = *(DWORD *)savedInstrPtr; #else *(BYTE *)instrPtrWriterHolder.GetRW() = *savedInstrPtr; #endif #ifdef TARGET_X86 // Epilog checking relies on precise control of when instrumentation for the first prolog // instruction is enabled or disabled. In particular, if a function has multiple epilogs, or // the first execution of the function terminates via an exception, and subsequent completions // do not, then the function may trigger a false stress fault if epilog checks are not disabled. if (offset == 0) { gcCover->doingEpilogChecks = false; } #endif // TARGET_X86 FlushInstructionCache(GetCurrentProcess(), (LPCVOID)instrPtr, 4); } // A managed thread (T) can race with the GC as follows: // 1) At the first safepoint, we notice that T is in preemptive mode during the call for GCStress // So, it is put it in cooperative mode for the purpose of GCStress(fPreemptiveGcDisabledForGcStress) // 2) We DoGCStress(). Start off background GC in a different thread. // 3) Then the thread T is put back to preemptive mode (because that's where it was). // Thread T continues execution along with the GC thread. // 4) The Jitted code puts thread T to cooperative mode, as part of PInvoke epilog // 5) Now instead of CORINFO_HELP_STOP_FOR_GC(), we hit the GCStress trap and start // another round of GCStress while in Cooperative mode. // 6) Now, thread T can modify the stack (ex: RedirectionFrame setup) while the GC thread is scanning it. // // This race is now mitigated below. Where we won't initiate a stress mode GC // for a thread in cooperative mode with an active ICF, if g_TrapReturningThreads is true. BOOL OnGcCoverageInterrupt(PCONTEXT regs) { // So that you can set counted breakpoint easily; GCcoverCount++; forceStack[0]= &regs; // This is so I can see it fastchecked PCODE controlPc = GetIP(regs); TADDR instrPtr = PCODEToPINSTR(controlPc); forceStack[0] = &instrPtr; // This is so I can see it fastchecked EECodeInfo codeInfo(controlPc); if (!codeInfo.IsValid()) return(FALSE); MethodDesc* pMD = codeInfo.GetMethodDesc(); DWORD offset = codeInfo.GetRelOffset(); forceStack[1] = &pMD; // This is so I can see it fastchecked forceStack[2] = &offset; // This is so I can see it fastchecked NativeCodeVersion nativeCodeVersion = codeInfo.GetNativeCodeVersion(); _ASSERTE(!nativeCodeVersion.IsNull()); GCCoverageInfo* gcCover = nativeCodeVersion.GetGCCoverageInfo(); forceStack[3] = &gcCover; // This is so I can see it fastchecked if (gcCover == 0) return(FALSE); // we aren't doing code gcCoverage on this function BYTE * savedInstrPtr = &gcCover->savedCode[offset]; Thread* pThread = GetThreadNULLOk(); if (!pThread) { // No thread at the moment so we aren't doing coverage for this function. // This should only occur for methods with the UnmanagedCallersOnlyAttribute, // where the call could be coming from a thread unknown to the CLR and // we haven't created a thread yet - see PreStubWorker_Preemptive(). _ASSERTE(pMD->HasUnmanagedCallersOnlyAttribute()); RemoveGcCoverageInterrupt(instrPtr, savedInstrPtr, gcCover, offset); return TRUE; } // If the thread is in preemptive mode then we must be in a // PInvoke stub, a method that has an inline PInvoke frame, // or be in a reverse PInvoke stub that's about to return. // // The PInvoke cases should should properly report GC refs if we // trigger GC here. But a reverse PInvoke stub may over-report // leading to spurious failures, as we would not normally report // anything for this method at this point. if (!pThread->PreemptiveGCDisabled() && pMD->HasUnmanagedCallersOnlyAttribute()) { RemoveGcCoverageInterrupt(instrPtr, savedInstrPtr, gcCover, offset); return TRUE; } // If we're in cooperative mode, we're supposed to stop for GC, // and there's an active ICF, don't initiate a stress GC. if (g_TrapReturningThreads && pThread->PreemptiveGCDisabled()) { Frame* pFrame = pThread->GetFrame(); if (InlinedCallFrame::FrameHasActiveCall(pFrame)) { RemoveGcCoverageInterrupt(instrPtr, savedInstrPtr, gcCover, offset); return TRUE; } } #if defined(USE_REDIRECT_FOR_GCSTRESS) && !defined(TARGET_UNIX) // If we're unable to redirect, then we simply won't test GC at this // location. if (!pThread->CheckForAndDoRedirectForGCStress(regs)) { RemoveGcCoverageInterrupt(instrPtr, savedInstrPtr, gcCover, offset); } #else // !USE_REDIRECT_FOR_GCSTRESS #ifdef _DEBUG if (!g_pConfig->SkipGCCoverage(pMD->GetModule()->GetSimpleName())) #endif DoGcStress(regs, codeInfo.GetNativeCodeVersion()); #endif // !USE_REDIRECT_FOR_GCSTRESS return TRUE; } // There are some code path in DoGcStress to return without doing a GC but we // now relies on EE suspension to update the GC STRESS instruction. // We need to do a extra EE suspension/resume even without GC. FORCEINLINE void UpdateGCStressInstructionWithoutGC () { ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_OTHER); ThreadSuspend::RestartEE(TRUE, TRUE); } /****************************************************************************/ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) { PCODE controlPc = GetIP(regs); PBYTE instrPtr = reinterpret_cast<PBYTE>(PCODEToPINSTR(controlPc)); if (nativeCodeVersion.IsNull()) { nativeCodeVersion = ExecutionManager::GetNativeCodeVersion(controlPc); if (nativeCodeVersion.IsNull()) return; } GCCoverageInfo *gcCover = nativeCodeVersion.GetGCCoverageInfo(); EECodeInfo codeInfo(controlPc); _ASSERTE(codeInfo.GetNativeCodeVersion() == nativeCodeVersion); DWORD offset = codeInfo.GetRelOffset(); Thread *pThread = GetThread(); // There is a race condition with the computation of `atCall`. Multiple threads could enter // this function (DoGcStress) at the same time. If one reads `*instrPtr` and sets `atCall` // to `true`, it will proceed to, lower down in this function, call `pThread->CommitGCStressInstructionUpdate()` // to replace the GCStress instruction at the call back to the original call instruction. // Other threads could then read `*instrPtr` and see the actual call instruction instead of the // call-specific GCStress instruction (INTERRUPT_INSTR_CALL[_32]). If `atCall` is set to false as // a result, then we'll do a GCStress as if this is a fully-interruptible code site, which is isn't, // which can leads to asserts (or, presumably, other failures). So, we have to check // `if (!IsGcCoverageInterruptInstruction(instrPtr))` after we read `*instrPtr`. bool atCall; bool afterCallProtect[2] = { false, false }; #if defined(TARGET_X86) || defined(TARGET_AMD64) BYTE instrVal = *instrPtr; forceStack[6] = &instrVal; // This is so I can see it fastchecked atCall = (instrVal == INTERRUPT_INSTR_CALL); if (instrVal == INTERRUPT_INSTR_PROTECT_BOTH_RET) { afterCallProtect[0] = afterCallProtect[1] = true; } else if (instrVal == INTERRUPT_INSTR_PROTECT_FIRST_RET) { afterCallProtect[0] = true; } else if (instrVal == INTERRUPT_INSTR_PROTECT_SECOND_RET) { afterCallProtect[1] = true; } #elif defined(TARGET_ARM) forceStack[6] = (WORD*)instrPtr; // This is so I can see it fastchecked size_t instrLen = GetARMInstructionLength(instrPtr); if (instrLen == 2) { WORD instrVal = *(WORD*)instrPtr; atCall = (instrVal == INTERRUPT_INSTR_CALL); afterCallProtect[0] = (instrVal == INTERRUPT_INSTR_PROTECT_RET); } else { _ASSERTE(instrLen == 4); DWORD instrVal32 = *(DWORD*)instrPtr; atCall = (instrVal32 == INTERRUPT_INSTR_CALL_32); afterCallProtect[0] = (instrVal32 == INTERRUPT_INSTR_PROTECT_RET_32); } #elif defined(TARGET_ARM64) DWORD instrVal = *(DWORD *)instrPtr; forceStack[6] = &instrVal; // This is so I can see it fastchecked atCall = (instrVal == INTERRUPT_INSTR_CALL); afterCallProtect[0] = (instrVal == INTERRUPT_INSTR_PROTECT_RET); #endif // _TARGET_* if (!IsGcCoverageInterruptInstruction(instrPtr)) { // This assert can fail if another thread changed original instruction to // GCCoverage Interrupt instruction between these two commands. Uncomment it // when threading issue gets resolved. // _ASSERTE(IsOriginalInstruction(instrPtr, gcCover, offset)); // Someone beat us to it, just go on running. return; } #ifdef TARGET_X86 /* are we at the very first instruction? If so, capture the register state */ bool bShouldUpdateProlog = true; if (gcCover->doingEpilogChecks) { if (offset == 0) { if ((gcCover->callerThread == 0) && (FastInterlockCompareExchangePointer(&gcCover->callerThread, pThread, 0) == 0)) { gcCover->callerRegs = *regs; gcCover->gcCount = GCHeapUtilities::GetGCHeap()->GetGcCount(); bShouldUpdateProlog = false; } else { // We have been in this routine before. Give up on epilog checking because // it is hard to insure that the saved caller register state is correct // This also has the effect of only doing the checking once per routine // (Even if there are multiple epilogs) gcCover->doingEpilogChecks = false; } } // If some other thread removes interrupt points, we abandon epilog testing // for this routine since the barrier at the beginning of the routine may not // be up anymore, and thus the caller context is now not guaranteed to be correct. // This should happen only very rarely so is not a big deal. if (gcCover->callerThread != pThread) gcCover->doingEpilogChecks = false; } instrVal = gcCover->savedCode[offset]; #endif // TARGET_X86 // <GCStress instruction update race> // Remove the interrupt instruction the next time we suspend the EE, // which should happen below in the call to StressHeap(). This is // done with the EE suspended so that we do not race with the executing // code on some other thread. If we allow that race, we may sometimes // get a STATUS_ACCESS_VIOLATION instead of the expected // STATUS_PRIVILEGED_INSTRUCTION because the OS has to inspect the code // stream to determine which exception code to raise. As a result, some // thread may take the exception due to the HLT, but by the time the OS // inspects the code stream, the HLT may be replaced with the original // code and it will just raise a STATUS_ACCESS_VIOLATION. #ifdef TARGET_X86 // only restore the original instruction if: // this is not the first instruction in the method's prolog, or // if it is, only if this is the second time we run in this method // note that if this is the second time in the prolog we've already disabled epilog checks if (offset != 0 || bShouldUpdateProlog) #endif pThread->PostGCStressInstructionUpdate((BYTE*)instrPtr, &gcCover->savedCode[offset]); #ifdef TARGET_X86 /* are we in a prolog or epilog? If so just test the unwind logic but don't actually do a GC since the prolog and epilog are not GC safe points */ if (gcCover->codeMan->IsInPrologOrEpilog(offset, gcCover->gcInfoToken, NULL)) { // We are not at a GC safe point so we can't Suspend EE (Suspend EE will yield to GC). // But we still have to update the GC Stress instruction. We do it directly without suspending // other threads, which means a race on updating is still possible. But for X86 the window of // race is so small that we could ignore it. We need a better solution if the race becomes a real problem. // see details about <GCStress instruction update race> in comments above pThread->CommitGCStressInstructionUpdate (); REGDISPLAY regDisp; CONTEXT copyRegs = *regs; pThread->Thread::InitRegDisplay(&regDisp, &copyRegs, true); pThread->UnhijackThread(); CodeManState codeManState; codeManState.dwIsSet = 0; // unwind out of the prolog or epilog gcCover->codeMan->UnwindStackFrame(&regDisp, &codeInfo, UpdateAllRegs, &codeManState, NULL); // Note we always doing the unwind, since that at does some checking (that we // unwind to a valid return address), but we only do the precise checking when // we are certain we have a good caller state if (gcCover->doingEpilogChecks) { // Confirm that we recovered our register state properly _ASSERTE(regDisp.PCTAddr == TADDR(gcCover->callerRegs.Esp)); // If a GC happened in this function, then the registers will not match // precisely. However there is still checks we can do. Also we can update // the saved register to its new value so that if a GC does not happen between // instructions we can recover (and since GCs are not allowed in the // prologs and epilogs, we get get complete coverage except for the first // instruction in the epilog (TODO: fix it for the first instr Case) _ASSERTE(pThread->PreemptiveGCDisabled()); // Epilogs should be in cooperative mode, no GC can happen right now. bool gcHappened = gcCover->gcCount != GCHeapUtilities::GetGCHeap()->GetGcCount(); checkAndUpdateReg(gcCover->callerRegs.Edi, *regDisp.GetEdiLocation(), gcHappened); checkAndUpdateReg(gcCover->callerRegs.Esi, *regDisp.GetEsiLocation(), gcHappened); checkAndUpdateReg(gcCover->callerRegs.Ebx, *regDisp.GetEbxLocation(), gcHappened); checkAndUpdateReg(gcCover->callerRegs.Ebp, *regDisp.GetEbpLocation(), gcHappened); gcCover->gcCount = GCHeapUtilities::GetGCHeap()->GetGcCount(); } return; } #endif // TARGET_X86 #if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) /* In non-fully interruptible code, if the EIP is just after a call instr means something different because it expects that we are IN the called method, not actually at the instruction just after the call. This is important, because until the called method returns, IT is responsible for protecting the return value. Thus just after a call instruction we have to protect EAX if the method being called returns a GC pointer. To figure this out, we need to stop AT the call so we can determine the target (and thus whether it returns one or more GC pointers), and then place a different interrupt instruction so that the GCCover harness protects the return value register(s) before doing the GC. This effectively simulates a hijack in non-fully interruptible code */ /* TODO. Simulating the hijack could cause problems in cases where the return register is not always a valid GC ref on the return offset. That could happen if we got to the return offset via a branch and not via return from the preceding call. However, this has not been an issue so far. Example: mov eax, someval test eax, eax jCC AFTERCALL call MethodWhichReturnsGCobject // return value is not used AFTERCALL: */ if (atCall) { // We need to update the GC Stress instruction. With partially-interruptible code // the call instruction is not a GC safe point so we can't use // StressHeap or UpdateGCStressInstructionWithoutGC to take care of updating; // So we just update the instruction directly. There are still chances for a race, // but it's not been a problem so far. // see details about <GCStress instruction update race> in comments above pThread->CommitGCStressInstructionUpdate (); PBYTE nextInstr; PBYTE target = getTargetOfCall((BYTE*) instrPtr, regs, (BYTE**)&nextInstr); if (target != 0) { ExecutableWriterHolder<BYTE> nextInstrWriterHolder(nextInstr, sizeof(DWORD)); if (!pThread->PreemptiveGCDisabled()) { // We are in preemptive mode in JITTed code. This implies that we are into IL stub // close to PINVOKE method. This call will never return objectrefs. #ifdef TARGET_ARM size_t instrLen = GetARMInstructionLength(nextInstr); if (instrLen == 2) *(WORD*)nextInstrWriterHolder.GetRW() = INTERRUPT_INSTR; else *(DWORD*)nextInstrWriterHolder.GetRW() = INTERRUPT_INSTR_32; #elif defined(TARGET_ARM64) *(DWORD*)nextInstrWriterHolder.GetRW() = INTERRUPT_INSTR; #else *nextInstrWriterHolder.GetRW() = INTERRUPT_INSTR; #endif } else { MethodDesc* targetMD = getTargetMethodDesc((PCODE)target); if (targetMD != 0) { // @Todo: possible race here, might need to be fixed if it become a problem. // It could become a problem if 64bit does partially interrupt work. // OK, we have the MD, mark the instruction after the CALL // appropriately ReplaceInstrAfterCall(nextInstrWriterHolder.GetRW(), targetMD); } } } // Must flush instruction cache before returning as instruction has been modified. // Note this needs to reach beyond the call by up to 4 bytes. FlushInstructionCache(GetCurrentProcess(), (LPCVOID)instrPtr, 10); // It's not GC safe point, the GC Stress instruction is // already committed and interrupt is already put at next instruction so we just return. return; } #else PORTABILITY_ASSERT("DoGcStress - NYI on this platform"); #endif // _TARGET_* bool enableWhenDone = false; if (!pThread->PreemptiveGCDisabled()) { pThread->DisablePreemptiveGC(); enableWhenDone = true; } #if 0 // TODO currently disabled. we only do a GC once per instruction location. /* note that for multiple threads, we can loose track and forget to set reset the interrupt after we executed an instruction, so some instruction points will not be executed twice, but we still ge350t very good coverage (perfect for single threaded cases) */ /* if we have not run this instruction in the past */ /* remember to wack it to an INTERUPT_INSTR again */ if (!gcCover->IsBitSetForOffset(offset)) { // gcCover->curInstr = instrPtr; gcCover->SetBitForOffset(offset); } #endif // 0 #if !defined(USE_REDIRECT_FOR_GCSTRESS) // // If we redirect for gc stress, we don't need this frame on the stack, // the redirection will push a resumable frame. // FrameWithCookie<ResumableFrame> frame(regs); frame.Push(pThread); #endif // USE_REDIRECT_FOR_GCSTRESS DWORD_PTR retValRegs[2] = { 0 }; UINT numberOfRegs = 0; if (afterCallProtect[0]) { #if defined(TARGET_AMD64) retValRegs[numberOfRegs++] = regs->Rax; #elif defined(TARGET_X86) retValRegs[numberOfRegs++] = regs->Eax; #elif defined(TARGET_ARM) retValRegs[numberOfRegs++] = regs->R0; #elif defined(TARGET_ARM64) retValRegs[numberOfRegs++] = regs->X0; #endif // TARGET_ARM64 } if (afterCallProtect[1]) { #if defined(TARGET_AMD64) && defined(TARGET_UNIX) retValRegs[numberOfRegs++] = regs->Rdx; #else // !TARGET_AMD64 || !TARGET_UNIX _ASSERTE(!"Not expected multi reg return with pointers."); #endif // !TARGET_AMD64 || !TARGET_UNIX } _ASSERTE(sizeof(OBJECTREF) == sizeof(DWORD_PTR)); GCFrame gcFrame(pThread, (OBJECTREF*)retValRegs, numberOfRegs, TRUE); MethodDesc *pMD = nativeCodeVersion.GetMethodDesc(); LOG((LF_GCROOTS, LL_EVERYTHING, "GCCOVER: Doing GC at method %s::%s offset 0x%x\n", pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, offset)); //------------------------------------------------------------------------- // Do the actual stress work // // BUG(github #10318) - when not using allocation contexts, the alloc lock // must be acquired here. Until fixed, this assert prevents random heap corruption. assert(GCHeapUtilities::UseThreadAllocationContexts()); GCHeapUtilities::GetGCHeap()->StressHeap(GetThread()->GetAllocContext()); // StressHeap can exit early w/o forcing a SuspendEE to trigger the instruction update // We can not rely on the return code to determine if the instruction update happened // Use HasPendingGCStressInstructionUpdate() to be certain. if(pThread->HasPendingGCStressInstructionUpdate()) UpdateGCStressInstructionWithoutGC (); // Must flush instruction cache before returning as instruction has been modified. FlushInstructionCache(GetCurrentProcess(), (LPCVOID)instrPtr, 4); CONSISTENCY_CHECK(!pThread->HasPendingGCStressInstructionUpdate()); if (numberOfRegs != 0) { if (afterCallProtect[0]) { #if defined(TARGET_AMD64) regs->Rax = retValRegs[0]; #elif defined(TARGET_X86) regs->Eax = retValRegs[0]; #elif defined(TARGET_ARM) regs->R0 = retValRegs[0]; #elif defined(TARGET_ARM64) regs->X[0] = retValRegs[0]; #else PORTABILITY_ASSERT("DoGCStress - return register"); #endif } if (afterCallProtect[1]) { #if defined(TARGET_AMD64) && defined(TARGET_UNIX) regs->Rdx = retValRegs[numberOfRegs - 1]; #else // !TARGET_AMD64 || !TARGET_UNIX _ASSERTE(!"Not expected multi reg return with pointers."); #endif // !TARGET_AMD64 || !TARGET_UNIX } } #if !defined(USE_REDIRECT_FOR_GCSTRESS) frame.Pop(pThread); #endif // USE_REDIRECT_FOR_GCSTRESS if (enableWhenDone) { BOOL b = GC_ON_TRANSITIONS(FALSE); // Don't do a GCStress 3 GC here pThread->EnablePreemptiveGC(); GC_ON_TRANSITIONS(b); } return; } #endif // HAVE_GCCOVER
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/libraries/System.Configuration.ConfigurationManager/tests/Mono/SubclassTypeValidatorTest.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // System.Configuration.SubclassTypeValidatorTest.cs - Unit tests // for System.Configuration.SubclassTypeValidator. // // Author: // Chris Toshok <[email protected]> // // Copyright (C) 2005 Novell, Inc (http://www.novell.com) // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. using System; using System.Configuration; using Xunit; namespace MonoTests.System.Configuration { class A { } class B : A { } public class SubclassTypeValidatorTest { [Fact] public void CanValidate() { SubclassTypeValidator v = new SubclassTypeValidator(typeof(A)); Assert.False(v.CanValidate(typeof(string))); Assert.False(v.CanValidate(typeof(int))); Assert.False(v.CanValidate(typeof(object))); Assert.True(v.CanValidate(typeof(Type))); } [Fact] public void Success() { SubclassTypeValidator v = new SubclassTypeValidator(typeof(A)); v.Validate(typeof(B)); v.Validate(typeof(A)); } [Fact] public void Failure() { SubclassTypeValidator v = new SubclassTypeValidator(typeof(B)); AssertExtensions.Throws<ArgumentException>(null, () => v.Validate(typeof(A))); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // System.Configuration.SubclassTypeValidatorTest.cs - Unit tests // for System.Configuration.SubclassTypeValidator. // // Author: // Chris Toshok <[email protected]> // // Copyright (C) 2005 Novell, Inc (http://www.novell.com) // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. using System; using System.Configuration; using Xunit; namespace MonoTests.System.Configuration { class A { } class B : A { } public class SubclassTypeValidatorTest { [Fact] public void CanValidate() { SubclassTypeValidator v = new SubclassTypeValidator(typeof(A)); Assert.False(v.CanValidate(typeof(string))); Assert.False(v.CanValidate(typeof(int))); Assert.False(v.CanValidate(typeof(object))); Assert.True(v.CanValidate(typeof(Type))); } [Fact] public void Success() { SubclassTypeValidator v = new SubclassTypeValidator(typeof(A)); v.Validate(typeof(B)); v.Validate(typeof(A)); } [Fact] public void Failure() { SubclassTypeValidator v = new SubclassTypeValidator(typeof(B)); AssertExtensions.Throws<ArgumentException>(null, () => v.Validate(typeof(A))); } } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/tests/verifier/make_ret_test.sh
#! /bin/sh SED="sed" if [ `which gsed 2> /dev/null` ]; then SED="gsed" fi TEST_NAME=$1 TEST_VALIDITY=$2 TEST_TYPE1=$3 TEST_TYPE2=$4 TEST_NAME=${TEST_VALIDITY}_${TEST_NAME} TEST_FILE=${TEST_NAME}_generated.il echo $TEST_FILE TEST_TYPE1=`echo $TEST_TYPE1 | $SED -s 's/&/\\\&/'` TEST_TYPE2=`echo $TEST_TYPE2 | $SED -s 's/&/\\\&/'` $SED -e "s/VALIDITY/${TEST_VALIDITY}/g" -e "s/TYPE1/${TEST_TYPE1}/g" -e "s/TYPE2/${TEST_TYPE2}/g" > $TEST_FILE <<//EOF // VALIDITY CIL which breaks the ECMA-335 rules. // this CIL should fail verification by a conforming CLI verifier. .assembly '${TEST_NAME}_generated' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .class ClassA extends [mscorlib]System.Object { } .class ClassB extends [mscorlib]System.Object { } .class ClassSubA extends ClassA { } .class interface abstract InterfaceA { } .class interface abstract InterfaceB { } .class ImplA extends [mscorlib]System.Object implements InterfaceA { } .class sealed MyValueType extends [mscorlib]System.ValueType { .field private int32 v } .class sealed MyValueType2 extends [mscorlib]System.ValueType { .field private int64 v } .class public Template extends [mscorlib]System.Object { .field public object foo } .class public Template\`1<T> extends [mscorlib]System.Object { } .class public Template\`2<T, U> extends [mscorlib]System.Object { } .class interface Bivariant\`2<+T,-U> extends [mscorlib]System.Object { } .class public BaseBase\`2<H,G> extends [mscorlib]System.Object { } .class public Base\`1<B> extends class BaseBase\`2<int32, !0> { } .class public SubClass1\`1<T> extends class Base\`1<!0> { } .class public SubClass2\`1<J> extends class Base\`1<!0> { } .class interface public Interface\`1<I> { } .class public InterfaceImpl\`1<M> implements class Interface\`1<!0> { } .class interface public ICovariant\`1<+T> { } .class interface public IContravariant\`1<-T> { } .class public CovariantImpl\`1<K> implements class ICovariant\`1<!0> { } .class public ContravariantImpl\`1<H> implements class IContravariant\`1<!0> { } .method public static TYPE1 Foo(TYPE2 V_0) cil managed { .maxstack 2 ldarg.0 ret // VALIDITY. } .method public static int32 Foo() cil managed { .entrypoint .maxstack 2 .locals init ( TYPE2 V) ldloc.0 call TYPE1 Foo(TYPE2) pop ldc.i4.0 ret // VALIDITY. } //EOF
#! /bin/sh SED="sed" if [ `which gsed 2> /dev/null` ]; then SED="gsed" fi TEST_NAME=$1 TEST_VALIDITY=$2 TEST_TYPE1=$3 TEST_TYPE2=$4 TEST_NAME=${TEST_VALIDITY}_${TEST_NAME} TEST_FILE=${TEST_NAME}_generated.il echo $TEST_FILE TEST_TYPE1=`echo $TEST_TYPE1 | $SED -s 's/&/\\\&/'` TEST_TYPE2=`echo $TEST_TYPE2 | $SED -s 's/&/\\\&/'` $SED -e "s/VALIDITY/${TEST_VALIDITY}/g" -e "s/TYPE1/${TEST_TYPE1}/g" -e "s/TYPE2/${TEST_TYPE2}/g" > $TEST_FILE <<//EOF // VALIDITY CIL which breaks the ECMA-335 rules. // this CIL should fail verification by a conforming CLI verifier. .assembly '${TEST_NAME}_generated' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .class ClassA extends [mscorlib]System.Object { } .class ClassB extends [mscorlib]System.Object { } .class ClassSubA extends ClassA { } .class interface abstract InterfaceA { } .class interface abstract InterfaceB { } .class ImplA extends [mscorlib]System.Object implements InterfaceA { } .class sealed MyValueType extends [mscorlib]System.ValueType { .field private int32 v } .class sealed MyValueType2 extends [mscorlib]System.ValueType { .field private int64 v } .class public Template extends [mscorlib]System.Object { .field public object foo } .class public Template\`1<T> extends [mscorlib]System.Object { } .class public Template\`2<T, U> extends [mscorlib]System.Object { } .class interface Bivariant\`2<+T,-U> extends [mscorlib]System.Object { } .class public BaseBase\`2<H,G> extends [mscorlib]System.Object { } .class public Base\`1<B> extends class BaseBase\`2<int32, !0> { } .class public SubClass1\`1<T> extends class Base\`1<!0> { } .class public SubClass2\`1<J> extends class Base\`1<!0> { } .class interface public Interface\`1<I> { } .class public InterfaceImpl\`1<M> implements class Interface\`1<!0> { } .class interface public ICovariant\`1<+T> { } .class interface public IContravariant\`1<-T> { } .class public CovariantImpl\`1<K> implements class ICovariant\`1<!0> { } .class public ContravariantImpl\`1<H> implements class IContravariant\`1<!0> { } .method public static TYPE1 Foo(TYPE2 V_0) cil managed { .maxstack 2 ldarg.0 ret // VALIDITY. } .method public static int32 Foo() cil managed { .entrypoint .maxstack 2 .locals init ( TYPE2 V) ldloc.0 call TYPE1 Foo(TYPE2) pop ldc.i4.0 ret // VALIDITY. } //EOF
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/libraries/System.Security.Cryptography.Pkcs/src/System.Security.Cryptography.Pkcs.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <IncludeDllSafeSearchPathAttribute>true</IncludeDllSafeSearchPathAttribute> <NoWarn>$(NoWarn);CA5384</NoWarn> <Nullable>enable</Nullable> <TargetFrameworks>$(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.1;netstandard2.0;$(NetFrameworkMinimum)</TargetFrameworks> <IsPackable>true</IsPackable> <PackageDescription>Provides support for PKCS and CMS algorithms. Commonly Used Types: System.Security.Cryptography.Pkcs.EnvelopedCms</PackageDescription> </PropertyGroup> <!-- DesignTimeBuild requires all the TargetFramework Derived Properties to not be present in the first property group. --> <PropertyGroup> <IsPartialFacadeAssembly Condition="$([MSBuild]::GetTargetFrameworkIdentifier('$(TargetFramework)')) == '.NETFramework'">true</IsPartialFacadeAssembly> <OmitResources Condition="'$(IsPartialFacadeAssembly)' == 'true'">true</OmitResources> </PropertyGroup> <Import Project="$(CommonPath)System\Security\Cryptography\Asn1\AsnXml.targets" Condition="'$(IsPartialFacadeAssembly)' != 'true'" /> <Import Project="$(CommonPath)System\Security\Cryptography\Asn1Reader\System.Security.Cryptography.Asn1Reader.Shared.projitems" Condition="'$(IsPartialFacadeAssembly)' != 'true'" /> <ItemGroup Condition="'$(IsPartialFacadeAssembly)' != 'true'"> <!-- API types (platform independent) --> <Compile Include="System\Security\Cryptography\CryptographicAttributeObject.cs" /> <Compile Include="System\Security\Cryptography\CryptographicAttributeObjectCollection.cs" /> <Compile Include="System\Security\Cryptography\CryptographicAttributeObjectEnumerator.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\AlgorithmIdentifier.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\CmsRecipient.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\CmsRecipientCollection.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\CmsRecipientEnumerator.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\ContentInfo.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\EnvelopedCms.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\KeyAgreeRecipientInfo.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\KeyTransRecipientInfo.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs9AttributeObject.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs9ContentType.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs9DocumentDescription.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs9DocumentName.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs9MessageDigest.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs9SigningTime.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\PublicKeyInfo.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\RecipientInfo.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\RecipientInfoCollection.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\RecipientInfoEnumerator.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\RecipientInfoType.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SubjectIdentifier.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SubjectIdentifierOrKey.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SubjectIdentifierOrKeyType.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SubjectIdentifierType.cs" /> <Compile Include="System\Security\Cryptography\Xml\X509IssuerSerial.cs" /> <!-- Internal types (platform independent) --> <Compile Include="Internal\Cryptography\DecryptorPal.cs" /> <Compile Include="Internal\Cryptography\KeyAgreeRecipientInfoPal.cs" /> <Compile Include="Internal\Cryptography\KeyLengths.cs" /> <Compile Include="Internal\Cryptography\KeyTransRecipientInfoPal.cs" /> <Compile Include="Internal\Cryptography\PkcsHelpers.cs" /> <Compile Include="Internal\Cryptography\PkcsPal.cs" /> <Compile Include="Internal\Cryptography\RecipientInfoPal.cs" /> <Compile Include="$(CommonPath)System\HexConverter.cs" Link="Common\System\HexConverter.cs" /> <Compile Include="$(CommonPath)System\Security\Cryptography\Helpers.cs" Link="Common\System\Security\Cryptography\Helpers.cs" /> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs7\ContentInfoAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs7\ContentInfoAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs7\ContentInfoAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs7\ContentInfoAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pkcs7\ContentInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs7\EncryptedContentInfoAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs7\EncryptedContentInfoAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs7\EncryptedContentInfoAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs7\EncryptedContentInfoAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pkcs7\EncryptedContentInfoAsn.xml</DependentUpon> </Compile> <Compile Include="Internal\Cryptography\Pal\AnyOS\AsnHelpers.cs" /> <Compile Include="Internal\Cryptography\Pal\AnyOS\ManagedPal.cs" /> <Compile Include="Internal\Cryptography\Pal\AnyOS\ManagedPal.Asn.cs" /> <Compile Include="Internal\Cryptography\Pal\AnyOS\ManagedPal.Decode.cs" /> <Compile Include="Internal\Cryptography\Pal\AnyOS\ManagedPal.Decrypt.cs" /> <Compile Include="Internal\Cryptography\Pal\AnyOS\ManagedPal.Encrypt.cs" /> <Compile Include="Internal\Cryptography\Pal\AnyOS\ManagedPal.Exceptions.cs" /> <Compile Include="Internal\Cryptography\Pal\AnyOS\ManagedPal.KeyAgree.cs" /> <Compile Include="Internal\Cryptography\Pal\AnyOS\ManagedPal.KeyTrans.cs" /> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\EnvelopedDataAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\EnvelopedDataAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\EnvelopedDataAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\KeyAgreeRecipientIdentifierAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\KeyAgreeRecipientIdentifierAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\KeyAgreeRecipientIdentifierAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\KeyAgreeRecipientInfoAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\KeyAgreeRecipientInfoAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\KeyAgreeRecipientInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\KeyTransRecipientInfoAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\KeyTransRecipientInfoAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\KeyTransRecipientInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\OriginatorIdentifierOrKeyAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\OriginatorIdentifierOrKeyAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\OriginatorIdentifierOrKeyAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\OriginatorInfoAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\OriginatorInfoAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\OriginatorInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\OriginatorPublicKeyAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\OriginatorPublicKeyAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\OriginatorPublicKeyAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\OtherKeyAttributeAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\OtherKeyAttributeAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\OtherKeyAttributeAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\RecipientEncryptedKeyAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\RecipientEncryptedKeyAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\RecipientEncryptedKeyAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\RecipientIdentifierAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\RecipientIdentifierAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\RecipientIdentifierAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\RecipientInfoAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\RecipientInfoAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\RecipientInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\RecipientKeyIdentifier.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\RecipientKeyIdentifier.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\RecipientKeyIdentifier.xml</DependentUpon> </Compile> </ItemGroup> <ItemGroup Condition="$([MSBuild]::IsTargetFrameworkCompatible('$(TargetFramework)', 'net7.0'))"> <Compile Include="$(CommonPath)DisableRuntimeMarshalling.cs" Link="Common\DisableRuntimeMarshalling.cs" /> </ItemGroup> <ItemGroup Condition="'$(TargetPlatformIdentifier)' == 'windows'"> <!-- Internal types (platform: Windows) --> <Compile Include="Internal\Cryptography\Pal\Windows\AlgId.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\DecryptorPalWindows.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\DecryptorPalWindows.Decode.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\DecryptorPalWindows.DecodeRecipients.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\DecryptorPalWindows.Decrypt.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\HeapBlockRetainer.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\HelpersWindows.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\KeyAgreeRecipientInfoPalWindows.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\KeyTransRecipientInfoPalWindows.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\PkcsPal.Windows.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\PkcsPalWindows.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\PkcsPalWindows.Encrypt.cs" /> <Compile Include="Microsoft\Win32\SafeHandles\SafeCertContextHandle.cs" /> <Compile Include="Microsoft\Win32\SafeHandles\SafeCryptMsgHandle.cs" /> <Compile Include="Microsoft\Win32\SafeHandles\SafeHeapAllocHandle.cs" /> <Compile Include="Microsoft\Win32\SafeHandles\SafeProvOrNCryptKeyHandle.cs" /> <Compile Include="Microsoft\Win32\SafeHandles\SafeProvOrNCryptKeyHandleUwp.cs" /> <!-- Interop types (platform: Windows) --> <Compile Include="$(CommonPath)Interop\Windows\Advapi32\Interop.CryptAcquireContext.cs" Link="Common\Interop\Windows\Advapi32\Interop.CryptAcquireContext.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Advapi32\Interop.CryptDestroyHash.cs" Link="Common\Interop\Windows\Advapi32\Interop.CryptDestroyHash.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Advapi32\Interop.CryptDestroyKey.cs" Link="Common\Interop\Windows\Advapi32\Interop.CryptDestroyKey.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Advapi32\Interop.CryptGetProvParam.cs" Link="Common\Interop\Windows\Advapi32\Interop.CryptGetProvParam.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Advapi32\Interop.CryptReleaseContext.cs" Link="Common\Interop\Windows\Advapi32\Interop.CryptReleaseContext.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Advapi32\SafeHashHandle.cs" Link="Common\Interop\Windows\Advapi32\SafeHashHandle.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Advapi32\SafeKeyHandle.cs" Link="Common\Interop\Windows\Advapi32\SafeKeyHandle.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Advapi32\SafeProvHandle.cs" Link="Common\Interop\Windows\Advapi32\SafeProvHandle.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CERT_CONTEXT.cs" Link="Common\Interop\Windows\Crypt32\Interop.CERT_CONTEXT.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CERT_ID.cs" Link="Common\Interop\Windows\Crypt32\Interop.CERT_ID.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CERT_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CERT_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CERT_ISSUER_SERIAL_NUMBER.cs" Link="Common\Interop\Windows\Crypt32\Interop.CERT_ISSUER_SERIAL_NUMBER.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CERT_PUBLIC_KEY_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CERT_PUBLIC_KEY_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertContextPropId.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertContextPropId.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertCreateCertificateContext.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertCreateCertificateContext.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertDuplicateCertificateContext_IntPtr.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertDuplicateCertificateContext_IntPtr.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertFreeCertificateContext.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertFreeCertificateContext.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertGetCertificateContextProperty.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertGetCertificateContextProperty.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertGetPublicKeyLength.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertGetPublicKeyLength.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertIdChoice.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertIdChoice.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertNameStrTypeAndFlags.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertNameStrTypeAndFlags.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertNameToStr.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertNameToStr.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_CMS_RECIPIENT_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_CMS_RECIPIENT_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_CTRL_DECRYPT_PARA.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_CTRL_DECRYPT_PARA.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_CTRL_KEY_AGREE_DECRYPT_PARA.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_CTRL_KEY_AGREE_DECRYPT_PARA.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_ENVELOPED_ENCODE_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_ENVELOPED_ENCODE_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_KEY_AGREE_RECIPIENT_ENCODE_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_KEY_AGREE_RECIPIENT_ENCODE_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_KEY_AGREE_RECIPIENT_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_KEY_AGREE_RECIPIENT_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_KEY_TRANS_RECIPIENT_ENCODE_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_KEY_TRANS_RECIPIENT_ENCODE_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_KEY_TRANS_RECIPIENT_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_KEY_TRANS_RECIPIENT_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_RC2_AUX_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_RC2_AUX_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_RECIPIENT_ENCODE_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_RECIPIENT_ENCODE_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_RECIPIENT_ENCRYPTED_KEY_ENCODE_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_RECIPIENT_ENCRYPTED_KEY_ENCODE_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_RECIPIENT_ENCRYPTED_KEY_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_RECIPIENT_ENCRYPTED_KEY_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMsgCmsRecipientChoice.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMsgCmsRecipientChoice.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMsgKeyAgreeOriginatorChoice.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMsgKeyAgreeOriginatorChoice.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CmsKeyAgreeKeyChoice.cs" Link="Common\Interop\Windows\Crypt32\Interop.CmsKeyAgreeKeyChoice.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CRYPT_ALGORITHM_IDENTIFIER.cs" Link="Common\Interop\Windows\Crypt32\Interop.CRYPT_ALGORITHM_IDENTIFIER.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CRYPT_ATTRIBUTE.cs" Link="Common\Interop\Windows\Crypt32\Interop.CRYPT_ATTRIBUTE.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CRYPT_ATTRIBUTE_TYPE_VALUE.cs" Link="Common\Interop\Windows\Crypt32\Interop.CRYPT_ATTRIBUTE_TYPE_VALUE.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CRYPT_ATTRIBUTES.cs" Link="Common\Interop\Windows\Crypt32\Interop.CRYPT_ATTRIBUTES.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CRYPT_BIT_BLOB.cs" Link="Common\Interop\Windows\Crypt32\Interop.CRYPT_BIT_BLOB.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CRYPT_KEY_PROV_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CRYPT_KEY_PROV_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CRYPT_RC2_CBC_PARAMETERS.cs" Link="Common\Interop\Windows\Crypt32\Interop.CRYPT_RC2_CBC_PARAMETERS.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptAcquireCertificatePrivateKey.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptAcquireCertificatePrivateKey.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptAcquireCertificatePrivateKeyFlags.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptAcquireCertificatePrivateKeyFlags.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptDecodeObject.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptDecodeObject.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptDecodeObjectStructType.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptDecodeObjectStructType.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptEncodeDecodeWrappers.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptEncodeDecodeWrappers.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptEncodeObject.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptEncodeObject.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptKeySpec.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptKeySpec.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptMsgClose.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptMsgClose.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptMsgControl.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptMsgControl.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptMsgGetParam.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptMsgGetParam.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptMsgOpenToDecode.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptMsgOpenToDecode.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptMsgOpenToEncode.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptMsgOpenToEncode.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptMsgParamType.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptMsgParamType.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptMsgType.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptMsgType.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptMsgUpdate.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptMsgUpdate.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptRc2Version.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptRc2Version.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.DATA_BLOB.cs" Link="Common\Interop\Windows\Crypt32\Interop.DATA_BLOB.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.ErrorCode.cs" Link="Common\Interop\Windows\Crypt32\Interop.ErrorCode.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.FindOidInfo.cs" Link="Common\Interop\Windows\Crypt32\Interop.FindOidInfo.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.MsgControlType.cs" Link="Common\Interop\Windows\Crypt32\Interop.MsgControlType.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.MsgEncodingType.cs" Link="Common\Interop\Windows\Crypt32\Interop.MsgEncodingType.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Kernel32\Interop.FormatMessage.cs" Link="Common\Interop\Windows\kernel32\Interop.FormatMessage.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Kernel32\Interop.Heap.cs" Link="Common\Interop\Windows\kernel32\Interop.Heap.cs" /> <Compile Include="$(CommonPath)Interop\Windows\NCrypt\Interop.ErrorCode.cs" Link="Common\Interop\Windows\NCrypt\Interop.ErrorCode.cs" /> <Compile Include="$(CommonPath)Interop\Windows\NCrypt\Interop.Properties.cs" Link="Common\Interop\Windows\NCrypt\Interop.Properties.cs" /> <Compile Include="$(CommonPath)Interop\Windows\NCrypt\Interop.NCryptFreeObject.cs" Link="Common\Interop\Windows\NCrypt\Interop.NCryptFreeObject.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Interop.Libraries.cs" Link="Common\Interop\Windows\Interop.Libraries.cs" /> <!-- Common types (platform: Windows) --> <Compile Include="$(CommonPath)System\Security\Cryptography\CryptoThrowHelper.Windows.cs" Link="Common\System\Security\Cryptography\CryptoThrowHelper.Windows.cs" /> <Compile Include="$(CommonPath)Microsoft\Win32\SafeHandles\SafeHandleCache.cs" Link="Common\Microsoft\Win32\SafeHandles\SafeHandleCache.cs" /> </ItemGroup> <!-- Internal types (platform: AnyOS) --> <ItemGroup Condition="'$(TargetPlatformIdentifier)' != 'windows' and '$(IsPartialFacadeAssembly)' != 'true'"> <Compile Include="Internal\Cryptography\Pal\AnyOS\PkcsPal.AnyOS.cs" /> </ItemGroup> <ItemGroup Condition="'$(IsPartialFacadeAssembly)' != 'true'"> <Compile Include="$(CommonPath)System\Obsoletions.cs" Link="Common\System\Obsoletions.cs" /> <Compile Include="$(CommonPath)System\Memory\PointerMemoryManager.cs" Link="Common\System\Memory\PointerMemoryManager.cs" /> <Compile Include="$(CommonPath)System\Security\Cryptography\CryptoPool.cs" Link="Common\System\Security\Cryptography\CryptoPool.cs" /> <Compile Include="$(CommonPath)System\Security\Cryptography\Oids.cs" Link="Common\System\Security\Cryptography\Oids.cs" /> <Compile Include="$(CommonPath)System\Security\Cryptography\Oids.Shared.cs" Link="Common\System\Security\Cryptography\Oids.Shared.cs" /> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\AlgorithmIdentifierAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\AlgorithmIdentifierAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\AlgorithmIdentifierAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\AlgorithmIdentifierAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\AlgorithmIdentifierAsn.xml</DependentUpon> </Compile> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\AlgorithmIdentifierAsn.manual.cs"> <Link>Common\System\Security\Cryptography\Asn1\AlgorithmIdentifierAsn.manual.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\AlgorithmIdentifierAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\AttributeAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\AttributeAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\AttributeAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\AttributeAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\AttributeAsn.xml</DependentUpon> </Compile> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\AttributeAsn.manual.cs"> <Link>Common\System\Security\Cryptography\Asn1\AttributeAsn.manual.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\AttributeAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\EncryptedPrivateKeyInfoAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\EncryptedPrivateKeyInfoAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\EncryptedPrivateKeyInfoAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\EncryptedPrivateKeyInfoAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\EncryptedPrivateKeyInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\DirectoryStringAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\DirectoryStringAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\DirectoryStringAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\DirectoryStringAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\DirectoryStringAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\EdiPartyNameAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\EdiPartyNameAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\EdiPartyNameAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\EdiPartyNameAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\EdiPartyNameAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\GeneralNameAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\GeneralNameAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\GeneralNameAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\GeneralNameAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\GeneralNameAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\OtherNameAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\OtherNameAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\OtherNameAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\OtherNameAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\OtherNameAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\PBEParameter.xml"> <Link>Common\System\Security\Cryptography\Asn1\PBEParameter.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\PBEParameter.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\PBEParameter.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\PBEParameter.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\PBES2Params.xml"> <Link>Common\System\Security\Cryptography\Asn1\PBES2Params.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\PBES2Params.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\PBES2Params.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\PBES2Params.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pbkdf2Params.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pbkdf2Params.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pbkdf2Params.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pbkdf2Params.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pbkdf2Params.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pbkdf2SaltChoice.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pbkdf2SaltChoice.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pbkdf2SaltChoice.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pbkdf2SaltChoice.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pbkdf2SaltChoice.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\PssParamsAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\PssParamsAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\PssParamsAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\PssParamsAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\PssParamsAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\OaepParamsAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\OaepParamsAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\OaepParamsAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\OaepParamsAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\OaepParamsAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\PrivateKeyInfoAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\PrivateKeyInfoAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\PrivateKeyInfoAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\PrivateKeyInfoAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\PrivateKeyInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Rc2CbcParameters.xml"> <Link>Common\System\Security\Cryptography\Asn1\Rc2CbcParameters.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Rc2CbcParameters.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Rc2CbcParameters.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Rc2CbcParameters.xml</DependentUpon> </Compile> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Rc2CbcParameters.manual.cs"> <Link>Common\System\Security\Cryptography\Asn1\Rc2CbcParameters.manual.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Rc2CbcParameters.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\SubjectPublicKeyInfoAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\SubjectPublicKeyInfoAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\SubjectPublicKeyInfoAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\SubjectPublicKeyInfoAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\SubjectPublicKeyInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\X509ExtensionAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\X509ExtensionAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\X509ExtensionAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\X509ExtensionAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\X509ExtensionAsn.xml</DependentUpon> </Compile> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\X509ExtensionAsn.manual.cs"> <Link>Common\System\Security\Cryptography\Asn1\X509ExtensionAsn.manual.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\X509ExtensionAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\CadesIssuerSerial.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\CadesIssuerSerial.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\CadesIssuerSerial.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\CertificateChoiceAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\CertificateChoiceAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\CertificateChoiceAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\EncapsulatedContentInfoAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\EncapsulatedContentInfoAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\EncapsulatedContentInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\EssCertId.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\EssCertId.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\EssCertId.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\EssCertIdV2.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\EssCertIdV2.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\EssCertIdV2.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\IssuerAndSerialNumberAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\IssuerAndSerialNumberAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\IssuerAndSerialNumberAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\MessageImprint.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\MessageImprint.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\MessageImprint.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\OtherCertificateFormat.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\OtherCertificateFormat.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\OtherCertificateFormat.xml</DependentUpon> </Compile> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\PkiFailureInfo.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\PkiStatus.cs" /> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\PkiStatusInfo.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\PkiStatusInfo.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\PkiStatusInfo.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\PolicyInformation.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\PolicyInformation.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\PolicyInformation.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\PolicyQualifierInfo.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\PolicyQualifierInfo.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\PolicyQualifierInfo.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161Accuracy.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161Accuracy.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\Rfc3161Accuracy.xml</DependentUpon> </Compile> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161Accuracy.manual.cs" /> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161TimeStampReq.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161TimeStampReq.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\Rfc3161TimeStampReq.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161TimeStampResp.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161TimeStampResp.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\Rfc3161TimeStampResp.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161TstInfo.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161TstInfo.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\Rfc3161TstInfo.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\SignedAttributesSet.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\SignedAttributesSet.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\SignedAttributesSet.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\SignedDataAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\SignedDataAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\SignedDataAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\SignerIdentifierAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\SignerIdentifierAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\SignerIdentifierAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\SignerInfoAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\SignerInfoAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\SignerInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\SigningCertificateAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\SigningCertificateAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\SignerInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\SigningCertificateV2Asn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\SigningCertificateV2Asn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\SigningCertificateV2Asn.xml</DependentUpon> </Compile> <Compile Include="System\Security\Cryptography\Pkcs\CmsSignature.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\CmsSignature.ECDsa.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\CmsSignature.RSA.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\CmsSigner.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SignedCms.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SignedCms.CtorOverloads.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SignerInfo.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SignerInfoCollection.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SignerInfoEnumerator.cs" /> </ItemGroup> <ItemGroup Condition="$([MSBuild]::IsTargetFrameworkCompatible('$(TargetFramework)', 'netstandard2.1'))"> <Compile Include="$(CommonPath)System\Security\Cryptography\KeyFormatHelper.cs" Link="Common\System\Security\Cryptography\KeyFormatHelper.cs" /> <Compile Include="$(CommonPath)System\Security\Cryptography\KeyFormatHelper.Encrypted.cs" Link="Common\System\Security\Cryptography\KeyFormatHelper.Encrypted.cs" /> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\DigestInfoAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\DigestInfoAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\DigestInfoAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\DigestInfoAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\DigestInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\CertBagAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\CertBagAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\CertBagAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\CertBagAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pkcs12\CertBagAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\MacData.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\MacData.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\MacData.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\MacData.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pkcs12\MacData.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\PfxAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\PfxAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\PfxAsn.manual.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\PfxAsn.manual.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pkcs12\PfxAsn.xml</DependentUpon> </Compile> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\PfxAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\PfxAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pkcs12\PfxAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\SafeBagAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\SafeBagAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\SafeBagAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\SafeBagAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pkcs12\SafeBagAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs7\EncryptedDataAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs7\EncryptedDataAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs7\EncryptedDataAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs7\EncryptedDataAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pkcs7\EncryptedDataAsn.xml</DependentUpon> </Compile> <Compile Include="$(CommonPath)System\Security\Cryptography\PasswordBasedEncryption.cs" Link="Common\System\Security\Cryptography\PasswordBasedEncryption.cs" /> <Compile Include="$(CommonPath)System\Security\Cryptography\Pkcs12Kdf.cs" Link="Common\System\Security\Cryptography\Pkcs12Kdf.cs" /> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\SecretBagAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\SecretBagAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\SecretBagAsn.xml</DependentUpon> </Compile> <Compile Include="System\Security\Cryptography\Pkcs\CmsSignature.DSA.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12Builder.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12CertBag.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12ConfidentialityMode.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12Info.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12IntegrityMode.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12KeyBag.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12SafeBag.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12SafeContents.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12SafeContentsBag.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12SecretBag.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12ShroudedKeyBag.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs8PrivateKeyInfo.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs9LocalKeyId.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Rfc3161RequestResponseStatus.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Rfc3161TimestampRequest.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Rfc3161TimestampToken.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Rfc3161TimestampTokenInfo.cs" /> </ItemGroup> <ItemGroup> <None Include="@(AsnXml)" /> </ItemGroup> <ItemGroup Condition="'$(IsPartialFacadeAssembly)' != 'true'"> <ProjectReference Include="$(LibrariesProjectRoot)System.Formats.Asn1\src\System.Formats.Asn1.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetFrameworkIdentifier)' == '.NETCoreApp'"> <Reference Include="System.Buffers" /> <Reference Include="System.Collections" /> <Reference Include="System.Collections.Concurrent" /> <Reference Include="System.Collections.NonGeneric" /> <Reference Include="System.Diagnostics.Debug" /> <Reference Include="System.Diagnostics.Tools" /> <Reference Include="System.Linq" /> <Reference Include="System.Memory" /> <Reference Include="System.Resources.ResourceManager" /> <Reference Include="System.Runtime" /> <Reference Include="System.Runtime.CompilerServices.Unsafe" /> <Reference Include="System.Runtime.Extensions" /> <Reference Include="System.Runtime.InteropServices" /> <Reference Include="System.Runtime.InteropServices.RuntimeInformation" /> <Reference Include="System.Runtime.Numerics" /> <Reference Include="System.Security.Cryptography.Algorithms" /> <Reference Include="System.Security.Cryptography.Cng" /> <Reference Include="System.Security.Cryptography.Csp" /> <Reference Include="System.Security.Cryptography.Encoding" /> <Reference Include="System.Security.Cryptography.Primitives" /> <Reference Include="System.Security.Cryptography.X509Certificates" /> <Reference Include="System.Text.Encoding.Extensions" /> <Reference Include="System.Threading" /> </ItemGroup> <ItemGroup Condition="$([MSBuild]::IsTargetFrameworkCompatible('$(TargetFramework)', 'net7.0'))"> <Reference Include="System.Security.Cryptography" /> </ItemGroup> <ItemGroup Condition="'$(TargetFrameworkIdentifier)' == '.NETStandard'"> <PackageReference Include="System.Security.Cryptography.Cng" Version="$(SystemSecurityCryptographyCngVersion)" /> </ItemGroup> <ItemGroup Condition="$(TargetFramework.StartsWith('netstandard2.0'))"> <PackageReference Include="System.Buffers" Version="$(SystemBuffersVersion)" /> <PackageReference Include="System.Memory" Version="$(SystemMemoryVersion)" /> </ItemGroup> <ItemGroup Condition="'$(TargetFrameworkIdentifier)' == '.NETFramework'"> <Reference Include="System.Security" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <IncludeDllSafeSearchPathAttribute>true</IncludeDllSafeSearchPathAttribute> <NoWarn>$(NoWarn);CA5384</NoWarn> <Nullable>enable</Nullable> <TargetFrameworks>$(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.1;netstandard2.0;$(NetFrameworkMinimum)</TargetFrameworks> <IsPackable>true</IsPackable> <PackageDescription>Provides support for PKCS and CMS algorithms. Commonly Used Types: System.Security.Cryptography.Pkcs.EnvelopedCms</PackageDescription> </PropertyGroup> <!-- DesignTimeBuild requires all the TargetFramework Derived Properties to not be present in the first property group. --> <PropertyGroup> <IsPartialFacadeAssembly Condition="$([MSBuild]::GetTargetFrameworkIdentifier('$(TargetFramework)')) == '.NETFramework'">true</IsPartialFacadeAssembly> <OmitResources Condition="'$(IsPartialFacadeAssembly)' == 'true'">true</OmitResources> </PropertyGroup> <Import Project="$(CommonPath)System\Security\Cryptography\Asn1\AsnXml.targets" Condition="'$(IsPartialFacadeAssembly)' != 'true'" /> <Import Project="$(CommonPath)System\Security\Cryptography\Asn1Reader\System.Security.Cryptography.Asn1Reader.Shared.projitems" Condition="'$(IsPartialFacadeAssembly)' != 'true'" /> <ItemGroup Condition="'$(IsPartialFacadeAssembly)' != 'true'"> <!-- API types (platform independent) --> <Compile Include="System\Security\Cryptography\CryptographicAttributeObject.cs" /> <Compile Include="System\Security\Cryptography\CryptographicAttributeObjectCollection.cs" /> <Compile Include="System\Security\Cryptography\CryptographicAttributeObjectEnumerator.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\AlgorithmIdentifier.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\CmsRecipient.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\CmsRecipientCollection.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\CmsRecipientEnumerator.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\ContentInfo.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\EnvelopedCms.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\KeyAgreeRecipientInfo.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\KeyTransRecipientInfo.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs9AttributeObject.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs9ContentType.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs9DocumentDescription.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs9DocumentName.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs9MessageDigest.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs9SigningTime.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\PublicKeyInfo.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\RecipientInfo.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\RecipientInfoCollection.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\RecipientInfoEnumerator.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\RecipientInfoType.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SubjectIdentifier.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SubjectIdentifierOrKey.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SubjectIdentifierOrKeyType.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SubjectIdentifierType.cs" /> <Compile Include="System\Security\Cryptography\Xml\X509IssuerSerial.cs" /> <!-- Internal types (platform independent) --> <Compile Include="Internal\Cryptography\DecryptorPal.cs" /> <Compile Include="Internal\Cryptography\KeyAgreeRecipientInfoPal.cs" /> <Compile Include="Internal\Cryptography\KeyLengths.cs" /> <Compile Include="Internal\Cryptography\KeyTransRecipientInfoPal.cs" /> <Compile Include="Internal\Cryptography\PkcsHelpers.cs" /> <Compile Include="Internal\Cryptography\PkcsPal.cs" /> <Compile Include="Internal\Cryptography\RecipientInfoPal.cs" /> <Compile Include="$(CommonPath)System\HexConverter.cs" Link="Common\System\HexConverter.cs" /> <Compile Include="$(CommonPath)System\Security\Cryptography\Helpers.cs" Link="Common\System\Security\Cryptography\Helpers.cs" /> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs7\ContentInfoAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs7\ContentInfoAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs7\ContentInfoAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs7\ContentInfoAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pkcs7\ContentInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs7\EncryptedContentInfoAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs7\EncryptedContentInfoAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs7\EncryptedContentInfoAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs7\EncryptedContentInfoAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pkcs7\EncryptedContentInfoAsn.xml</DependentUpon> </Compile> <Compile Include="Internal\Cryptography\Pal\AnyOS\AsnHelpers.cs" /> <Compile Include="Internal\Cryptography\Pal\AnyOS\ManagedPal.cs" /> <Compile Include="Internal\Cryptography\Pal\AnyOS\ManagedPal.Asn.cs" /> <Compile Include="Internal\Cryptography\Pal\AnyOS\ManagedPal.Decode.cs" /> <Compile Include="Internal\Cryptography\Pal\AnyOS\ManagedPal.Decrypt.cs" /> <Compile Include="Internal\Cryptography\Pal\AnyOS\ManagedPal.Encrypt.cs" /> <Compile Include="Internal\Cryptography\Pal\AnyOS\ManagedPal.Exceptions.cs" /> <Compile Include="Internal\Cryptography\Pal\AnyOS\ManagedPal.KeyAgree.cs" /> <Compile Include="Internal\Cryptography\Pal\AnyOS\ManagedPal.KeyTrans.cs" /> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\EnvelopedDataAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\EnvelopedDataAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\EnvelopedDataAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\KeyAgreeRecipientIdentifierAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\KeyAgreeRecipientIdentifierAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\KeyAgreeRecipientIdentifierAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\KeyAgreeRecipientInfoAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\KeyAgreeRecipientInfoAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\KeyAgreeRecipientInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\KeyTransRecipientInfoAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\KeyTransRecipientInfoAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\KeyTransRecipientInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\OriginatorIdentifierOrKeyAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\OriginatorIdentifierOrKeyAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\OriginatorIdentifierOrKeyAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\OriginatorInfoAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\OriginatorInfoAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\OriginatorInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\OriginatorPublicKeyAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\OriginatorPublicKeyAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\OriginatorPublicKeyAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\OtherKeyAttributeAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\OtherKeyAttributeAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\OtherKeyAttributeAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\RecipientEncryptedKeyAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\RecipientEncryptedKeyAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\RecipientEncryptedKeyAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\RecipientIdentifierAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\RecipientIdentifierAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\RecipientIdentifierAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\RecipientInfoAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\RecipientInfoAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\RecipientInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\RecipientKeyIdentifier.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\RecipientKeyIdentifier.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\RecipientKeyIdentifier.xml</DependentUpon> </Compile> </ItemGroup> <ItemGroup Condition="$([MSBuild]::IsTargetFrameworkCompatible('$(TargetFramework)', 'net7.0'))"> <Compile Include="$(CommonPath)DisableRuntimeMarshalling.cs" Link="Common\DisableRuntimeMarshalling.cs" /> </ItemGroup> <ItemGroup Condition="'$(TargetPlatformIdentifier)' == 'windows'"> <!-- Internal types (platform: Windows) --> <Compile Include="Internal\Cryptography\Pal\Windows\AlgId.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\DecryptorPalWindows.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\DecryptorPalWindows.Decode.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\DecryptorPalWindows.DecodeRecipients.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\DecryptorPalWindows.Decrypt.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\HeapBlockRetainer.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\HelpersWindows.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\KeyAgreeRecipientInfoPalWindows.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\KeyTransRecipientInfoPalWindows.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\PkcsPal.Windows.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\PkcsPalWindows.cs" /> <Compile Include="Internal\Cryptography\Pal\Windows\PkcsPalWindows.Encrypt.cs" /> <Compile Include="Microsoft\Win32\SafeHandles\SafeCertContextHandle.cs" /> <Compile Include="Microsoft\Win32\SafeHandles\SafeCryptMsgHandle.cs" /> <Compile Include="Microsoft\Win32\SafeHandles\SafeHeapAllocHandle.cs" /> <Compile Include="Microsoft\Win32\SafeHandles\SafeProvOrNCryptKeyHandle.cs" /> <Compile Include="Microsoft\Win32\SafeHandles\SafeProvOrNCryptKeyHandleUwp.cs" /> <!-- Interop types (platform: Windows) --> <Compile Include="$(CommonPath)Interop\Windows\Advapi32\Interop.CryptAcquireContext.cs" Link="Common\Interop\Windows\Advapi32\Interop.CryptAcquireContext.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Advapi32\Interop.CryptDestroyHash.cs" Link="Common\Interop\Windows\Advapi32\Interop.CryptDestroyHash.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Advapi32\Interop.CryptDestroyKey.cs" Link="Common\Interop\Windows\Advapi32\Interop.CryptDestroyKey.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Advapi32\Interop.CryptGetProvParam.cs" Link="Common\Interop\Windows\Advapi32\Interop.CryptGetProvParam.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Advapi32\Interop.CryptReleaseContext.cs" Link="Common\Interop\Windows\Advapi32\Interop.CryptReleaseContext.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Advapi32\SafeHashHandle.cs" Link="Common\Interop\Windows\Advapi32\SafeHashHandle.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Advapi32\SafeKeyHandle.cs" Link="Common\Interop\Windows\Advapi32\SafeKeyHandle.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Advapi32\SafeProvHandle.cs" Link="Common\Interop\Windows\Advapi32\SafeProvHandle.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CERT_CONTEXT.cs" Link="Common\Interop\Windows\Crypt32\Interop.CERT_CONTEXT.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CERT_ID.cs" Link="Common\Interop\Windows\Crypt32\Interop.CERT_ID.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CERT_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CERT_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CERT_ISSUER_SERIAL_NUMBER.cs" Link="Common\Interop\Windows\Crypt32\Interop.CERT_ISSUER_SERIAL_NUMBER.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CERT_PUBLIC_KEY_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CERT_PUBLIC_KEY_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertContextPropId.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertContextPropId.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertCreateCertificateContext.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertCreateCertificateContext.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertDuplicateCertificateContext_IntPtr.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertDuplicateCertificateContext_IntPtr.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertFreeCertificateContext.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertFreeCertificateContext.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertGetCertificateContextProperty.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertGetCertificateContextProperty.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertGetPublicKeyLength.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertGetPublicKeyLength.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertIdChoice.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertIdChoice.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertNameStrTypeAndFlags.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertNameStrTypeAndFlags.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CertNameToStr.cs" Link="Common\Interop\Windows\Crypt32\Interop.CertNameToStr.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_CMS_RECIPIENT_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_CMS_RECIPIENT_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_CTRL_DECRYPT_PARA.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_CTRL_DECRYPT_PARA.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_CTRL_KEY_AGREE_DECRYPT_PARA.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_CTRL_KEY_AGREE_DECRYPT_PARA.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_ENVELOPED_ENCODE_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_ENVELOPED_ENCODE_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_KEY_AGREE_RECIPIENT_ENCODE_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_KEY_AGREE_RECIPIENT_ENCODE_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_KEY_AGREE_RECIPIENT_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_KEY_AGREE_RECIPIENT_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_KEY_TRANS_RECIPIENT_ENCODE_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_KEY_TRANS_RECIPIENT_ENCODE_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_KEY_TRANS_RECIPIENT_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_KEY_TRANS_RECIPIENT_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_RC2_AUX_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_RC2_AUX_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_RECIPIENT_ENCODE_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_RECIPIENT_ENCODE_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_RECIPIENT_ENCRYPTED_KEY_ENCODE_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_RECIPIENT_ENCRYPTED_KEY_ENCODE_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMSG_RECIPIENT_ENCRYPTED_KEY_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMSG_RECIPIENT_ENCRYPTED_KEY_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMsgCmsRecipientChoice.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMsgCmsRecipientChoice.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CMsgKeyAgreeOriginatorChoice.cs" Link="Common\Interop\Windows\Crypt32\Interop.CMsgKeyAgreeOriginatorChoice.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CmsKeyAgreeKeyChoice.cs" Link="Common\Interop\Windows\Crypt32\Interop.CmsKeyAgreeKeyChoice.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CRYPT_ALGORITHM_IDENTIFIER.cs" Link="Common\Interop\Windows\Crypt32\Interop.CRYPT_ALGORITHM_IDENTIFIER.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CRYPT_ATTRIBUTE.cs" Link="Common\Interop\Windows\Crypt32\Interop.CRYPT_ATTRIBUTE.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CRYPT_ATTRIBUTE_TYPE_VALUE.cs" Link="Common\Interop\Windows\Crypt32\Interop.CRYPT_ATTRIBUTE_TYPE_VALUE.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CRYPT_ATTRIBUTES.cs" Link="Common\Interop\Windows\Crypt32\Interop.CRYPT_ATTRIBUTES.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CRYPT_BIT_BLOB.cs" Link="Common\Interop\Windows\Crypt32\Interop.CRYPT_BIT_BLOB.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CRYPT_KEY_PROV_INFO.cs" Link="Common\Interop\Windows\Crypt32\Interop.CRYPT_KEY_PROV_INFO.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CRYPT_RC2_CBC_PARAMETERS.cs" Link="Common\Interop\Windows\Crypt32\Interop.CRYPT_RC2_CBC_PARAMETERS.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptAcquireCertificatePrivateKey.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptAcquireCertificatePrivateKey.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptAcquireCertificatePrivateKeyFlags.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptAcquireCertificatePrivateKeyFlags.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptDecodeObject.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptDecodeObject.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptDecodeObjectStructType.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptDecodeObjectStructType.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptEncodeDecodeWrappers.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptEncodeDecodeWrappers.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptEncodeObject.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptEncodeObject.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptKeySpec.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptKeySpec.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptMsgClose.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptMsgClose.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptMsgControl.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptMsgControl.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptMsgGetParam.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptMsgGetParam.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptMsgOpenToDecode.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptMsgOpenToDecode.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptMsgOpenToEncode.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptMsgOpenToEncode.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptMsgParamType.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptMsgParamType.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptMsgType.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptMsgType.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptMsgUpdate.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptMsgUpdate.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.CryptRc2Version.cs" Link="Common\Interop\Windows\Crypt32\Interop.CryptRc2Version.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.DATA_BLOB.cs" Link="Common\Interop\Windows\Crypt32\Interop.DATA_BLOB.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.ErrorCode.cs" Link="Common\Interop\Windows\Crypt32\Interop.ErrorCode.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.FindOidInfo.cs" Link="Common\Interop\Windows\Crypt32\Interop.FindOidInfo.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.MsgControlType.cs" Link="Common\Interop\Windows\Crypt32\Interop.MsgControlType.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Crypt32\Interop.MsgEncodingType.cs" Link="Common\Interop\Windows\Crypt32\Interop.MsgEncodingType.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Kernel32\Interop.FormatMessage.cs" Link="Common\Interop\Windows\kernel32\Interop.FormatMessage.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Kernel32\Interop.Heap.cs" Link="Common\Interop\Windows\kernel32\Interop.Heap.cs" /> <Compile Include="$(CommonPath)Interop\Windows\NCrypt\Interop.ErrorCode.cs" Link="Common\Interop\Windows\NCrypt\Interop.ErrorCode.cs" /> <Compile Include="$(CommonPath)Interop\Windows\NCrypt\Interop.Properties.cs" Link="Common\Interop\Windows\NCrypt\Interop.Properties.cs" /> <Compile Include="$(CommonPath)Interop\Windows\NCrypt\Interop.NCryptFreeObject.cs" Link="Common\Interop\Windows\NCrypt\Interop.NCryptFreeObject.cs" /> <Compile Include="$(CommonPath)Interop\Windows\Interop.Libraries.cs" Link="Common\Interop\Windows\Interop.Libraries.cs" /> <!-- Common types (platform: Windows) --> <Compile Include="$(CommonPath)System\Security\Cryptography\CryptoThrowHelper.Windows.cs" Link="Common\System\Security\Cryptography\CryptoThrowHelper.Windows.cs" /> <Compile Include="$(CommonPath)Microsoft\Win32\SafeHandles\SafeHandleCache.cs" Link="Common\Microsoft\Win32\SafeHandles\SafeHandleCache.cs" /> </ItemGroup> <!-- Internal types (platform: AnyOS) --> <ItemGroup Condition="'$(TargetPlatformIdentifier)' != 'windows' and '$(IsPartialFacadeAssembly)' != 'true'"> <Compile Include="Internal\Cryptography\Pal\AnyOS\PkcsPal.AnyOS.cs" /> </ItemGroup> <ItemGroup Condition="'$(IsPartialFacadeAssembly)' != 'true'"> <Compile Include="$(CommonPath)System\Obsoletions.cs" Link="Common\System\Obsoletions.cs" /> <Compile Include="$(CommonPath)System\Memory\PointerMemoryManager.cs" Link="Common\System\Memory\PointerMemoryManager.cs" /> <Compile Include="$(CommonPath)System\Security\Cryptography\CryptoPool.cs" Link="Common\System\Security\Cryptography\CryptoPool.cs" /> <Compile Include="$(CommonPath)System\Security\Cryptography\Oids.cs" Link="Common\System\Security\Cryptography\Oids.cs" /> <Compile Include="$(CommonPath)System\Security\Cryptography\Oids.Shared.cs" Link="Common\System\Security\Cryptography\Oids.Shared.cs" /> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\AlgorithmIdentifierAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\AlgorithmIdentifierAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\AlgorithmIdentifierAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\AlgorithmIdentifierAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\AlgorithmIdentifierAsn.xml</DependentUpon> </Compile> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\AlgorithmIdentifierAsn.manual.cs"> <Link>Common\System\Security\Cryptography\Asn1\AlgorithmIdentifierAsn.manual.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\AlgorithmIdentifierAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\AttributeAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\AttributeAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\AttributeAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\AttributeAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\AttributeAsn.xml</DependentUpon> </Compile> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\AttributeAsn.manual.cs"> <Link>Common\System\Security\Cryptography\Asn1\AttributeAsn.manual.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\AttributeAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\EncryptedPrivateKeyInfoAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\EncryptedPrivateKeyInfoAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\EncryptedPrivateKeyInfoAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\EncryptedPrivateKeyInfoAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\EncryptedPrivateKeyInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\DirectoryStringAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\DirectoryStringAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\DirectoryStringAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\DirectoryStringAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\DirectoryStringAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\EdiPartyNameAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\EdiPartyNameAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\EdiPartyNameAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\EdiPartyNameAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\EdiPartyNameAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\GeneralNameAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\GeneralNameAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\GeneralNameAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\GeneralNameAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\GeneralNameAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\OtherNameAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\OtherNameAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\OtherNameAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\OtherNameAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\OtherNameAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\PBEParameter.xml"> <Link>Common\System\Security\Cryptography\Asn1\PBEParameter.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\PBEParameter.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\PBEParameter.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\PBEParameter.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\PBES2Params.xml"> <Link>Common\System\Security\Cryptography\Asn1\PBES2Params.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\PBES2Params.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\PBES2Params.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\PBES2Params.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pbkdf2Params.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pbkdf2Params.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pbkdf2Params.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pbkdf2Params.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pbkdf2Params.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pbkdf2SaltChoice.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pbkdf2SaltChoice.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pbkdf2SaltChoice.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pbkdf2SaltChoice.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pbkdf2SaltChoice.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\PssParamsAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\PssParamsAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\PssParamsAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\PssParamsAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\PssParamsAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\OaepParamsAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\OaepParamsAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\OaepParamsAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\OaepParamsAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\OaepParamsAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\PrivateKeyInfoAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\PrivateKeyInfoAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\PrivateKeyInfoAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\PrivateKeyInfoAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\PrivateKeyInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Rc2CbcParameters.xml"> <Link>Common\System\Security\Cryptography\Asn1\Rc2CbcParameters.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Rc2CbcParameters.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Rc2CbcParameters.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Rc2CbcParameters.xml</DependentUpon> </Compile> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Rc2CbcParameters.manual.cs"> <Link>Common\System\Security\Cryptography\Asn1\Rc2CbcParameters.manual.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Rc2CbcParameters.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\SubjectPublicKeyInfoAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\SubjectPublicKeyInfoAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\SubjectPublicKeyInfoAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\SubjectPublicKeyInfoAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\SubjectPublicKeyInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\X509ExtensionAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\X509ExtensionAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\X509ExtensionAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\X509ExtensionAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\X509ExtensionAsn.xml</DependentUpon> </Compile> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\X509ExtensionAsn.manual.cs"> <Link>Common\System\Security\Cryptography\Asn1\X509ExtensionAsn.manual.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\X509ExtensionAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\CadesIssuerSerial.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\CadesIssuerSerial.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\CadesIssuerSerial.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\CertificateChoiceAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\CertificateChoiceAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\CertificateChoiceAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\EncapsulatedContentInfoAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\EncapsulatedContentInfoAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\EncapsulatedContentInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\EssCertId.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\EssCertId.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\EssCertId.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\EssCertIdV2.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\EssCertIdV2.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\EssCertIdV2.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\IssuerAndSerialNumberAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\IssuerAndSerialNumberAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\IssuerAndSerialNumberAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\MessageImprint.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\MessageImprint.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\MessageImprint.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\OtherCertificateFormat.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\OtherCertificateFormat.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\OtherCertificateFormat.xml</DependentUpon> </Compile> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\PkiFailureInfo.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\PkiStatus.cs" /> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\PkiStatusInfo.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\PkiStatusInfo.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\PkiStatusInfo.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\PolicyInformation.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\PolicyInformation.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\PolicyInformation.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\PolicyQualifierInfo.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\PolicyQualifierInfo.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\PolicyQualifierInfo.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161Accuracy.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161Accuracy.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\Rfc3161Accuracy.xml</DependentUpon> </Compile> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161Accuracy.manual.cs" /> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161TimeStampReq.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161TimeStampReq.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\Rfc3161TimeStampReq.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161TimeStampResp.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161TimeStampResp.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\Rfc3161TimeStampResp.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161TstInfo.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\Rfc3161TstInfo.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\Rfc3161TstInfo.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\SignedAttributesSet.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\SignedAttributesSet.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\SignedAttributesSet.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\SignedDataAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\SignedDataAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\SignedDataAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\SignerIdentifierAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\SignerIdentifierAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\SignerIdentifierAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\SignerInfoAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\SignerInfoAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\SignerInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\SigningCertificateAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\SigningCertificateAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\SignerInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\SigningCertificateV2Asn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\SigningCertificateV2Asn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\SigningCertificateV2Asn.xml</DependentUpon> </Compile> <Compile Include="System\Security\Cryptography\Pkcs\CmsSignature.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\CmsSignature.ECDsa.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\CmsSignature.RSA.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\CmsSigner.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SignedCms.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SignedCms.CtorOverloads.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SignerInfo.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SignerInfoCollection.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\SignerInfoEnumerator.cs" /> </ItemGroup> <ItemGroup Condition="$([MSBuild]::IsTargetFrameworkCompatible('$(TargetFramework)', 'netstandard2.1'))"> <Compile Include="$(CommonPath)System\Security\Cryptography\KeyFormatHelper.cs" Link="Common\System\Security\Cryptography\KeyFormatHelper.cs" /> <Compile Include="$(CommonPath)System\Security\Cryptography\KeyFormatHelper.Encrypted.cs" Link="Common\System\Security\Cryptography\KeyFormatHelper.Encrypted.cs" /> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\DigestInfoAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\DigestInfoAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\DigestInfoAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\DigestInfoAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\DigestInfoAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\CertBagAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\CertBagAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\CertBagAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\CertBagAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pkcs12\CertBagAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\MacData.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\MacData.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\MacData.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\MacData.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pkcs12\MacData.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\PfxAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\PfxAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\PfxAsn.manual.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\PfxAsn.manual.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pkcs12\PfxAsn.xml</DependentUpon> </Compile> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\PfxAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\PfxAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pkcs12\PfxAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\SafeBagAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\SafeBagAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs12\SafeBagAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs12\SafeBagAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pkcs12\SafeBagAsn.xml</DependentUpon> </Compile> <AsnXml Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs7\EncryptedDataAsn.xml"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs7\EncryptedDataAsn.xml</Link> </AsnXml> <Compile Include="$(CommonPath)System\Security\Cryptography\Asn1\Pkcs7\EncryptedDataAsn.xml.cs"> <Link>Common\System\Security\Cryptography\Asn1\Pkcs7\EncryptedDataAsn.xml.cs</Link> <DependentUpon>Common\System\Security\Cryptography\Asn1\Pkcs7\EncryptedDataAsn.xml</DependentUpon> </Compile> <Compile Include="$(CommonPath)System\Security\Cryptography\PasswordBasedEncryption.cs" Link="Common\System\Security\Cryptography\PasswordBasedEncryption.cs" /> <Compile Include="$(CommonPath)System\Security\Cryptography\Pkcs12Kdf.cs" Link="Common\System\Security\Cryptography\Pkcs12Kdf.cs" /> <AsnXml Include="System\Security\Cryptography\Pkcs\Asn1\SecretBagAsn.xml" /> <Compile Include="System\Security\Cryptography\Pkcs\Asn1\SecretBagAsn.xml.cs"> <DependentUpon>System\Security\Cryptography\Pkcs\Asn1\SecretBagAsn.xml</DependentUpon> </Compile> <Compile Include="System\Security\Cryptography\Pkcs\CmsSignature.DSA.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12Builder.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12CertBag.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12ConfidentialityMode.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12Info.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12IntegrityMode.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12KeyBag.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12SafeBag.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12SafeContents.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12SafeContentsBag.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12SecretBag.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs12ShroudedKeyBag.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs8PrivateKeyInfo.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Pkcs9LocalKeyId.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Rfc3161RequestResponseStatus.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Rfc3161TimestampRequest.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Rfc3161TimestampToken.cs" /> <Compile Include="System\Security\Cryptography\Pkcs\Rfc3161TimestampTokenInfo.cs" /> </ItemGroup> <ItemGroup> <None Include="@(AsnXml)" /> </ItemGroup> <ItemGroup Condition="'$(IsPartialFacadeAssembly)' != 'true'"> <ProjectReference Include="$(LibrariesProjectRoot)System.Formats.Asn1\src\System.Formats.Asn1.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetFrameworkIdentifier)' == '.NETCoreApp'"> <Reference Include="System.Buffers" /> <Reference Include="System.Collections" /> <Reference Include="System.Collections.Concurrent" /> <Reference Include="System.Collections.NonGeneric" /> <Reference Include="System.Diagnostics.Debug" /> <Reference Include="System.Diagnostics.Tools" /> <Reference Include="System.Linq" /> <Reference Include="System.Memory" /> <Reference Include="System.Resources.ResourceManager" /> <Reference Include="System.Runtime" /> <Reference Include="System.Runtime.CompilerServices.Unsafe" /> <Reference Include="System.Runtime.Extensions" /> <Reference Include="System.Runtime.InteropServices" /> <Reference Include="System.Runtime.InteropServices.RuntimeInformation" /> <Reference Include="System.Runtime.Numerics" /> <Reference Include="System.Security.Cryptography.Algorithms" /> <Reference Include="System.Security.Cryptography.Cng" /> <Reference Include="System.Security.Cryptography.Csp" /> <Reference Include="System.Security.Cryptography.Encoding" /> <Reference Include="System.Security.Cryptography.Primitives" /> <Reference Include="System.Security.Cryptography.X509Certificates" /> <Reference Include="System.Text.Encoding.Extensions" /> <Reference Include="System.Threading" /> </ItemGroup> <ItemGroup Condition="$([MSBuild]::IsTargetFrameworkCompatible('$(TargetFramework)', 'net7.0'))"> <Reference Include="System.Security.Cryptography" /> </ItemGroup> <ItemGroup Condition="'$(TargetFrameworkIdentifier)' == '.NETStandard'"> <PackageReference Include="System.Security.Cryptography.Cng" Version="$(SystemSecurityCryptographyCngVersion)" /> </ItemGroup> <ItemGroup Condition="$(TargetFramework.StartsWith('netstandard2.0'))"> <PackageReference Include="System.Buffers" Version="$(SystemBuffersVersion)" /> <PackageReference Include="System.Memory" Version="$(SystemMemoryVersion)" /> </ItemGroup> <ItemGroup Condition="'$(TargetFrameworkIdentifier)' == '.NETFramework'"> <Reference Include="System.Security" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/JIT/jit64/gc/misc/struct5_4.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; struct Pad { #pragma warning disable 0414 public double d1; public double d2; public double d3; public double d4; public double d5; public double d6; public double d7; public double d8; public double d9; public double d10; public double d11; public double d12; public double d13; public double d14; public double d15; public double d16; public double d17; public double d18; public double d19; public double d20; public double d21; public double d22; public double d23; public double d24; public double d25; public double d26; public double d27; public double d28; #pragma warning restore 0414 } struct S { #pragma warning disable 0414 public String str2; #pragma warning restore 0414 public String str; public Pad pad; public S(String s) { str = s; str2 = s + str; pad.d1 = pad.d2 = pad.d3 = pad.d4 = pad.d5 = pad.d6 = pad.d7 = pad.d8 = pad.d9 = pad.d10 = pad.d11 = pad.d12 = pad.d13 = pad.d14 = pad.d15 = pad.d16 = pad.d17 = pad.d18 = pad.d19 = pad.d20 = pad.d21 = pad.d22 = pad.d23 = pad.d24 = pad.d25 = pad.d26 = pad.d27 = pad.d28 = 3.3; } } class Test_struct5_4 { public static void c(S s1, S s2, S s3, S s4) { Console.WriteLine(s1.str + s2.str + s3.str + s4.str); } public static int Main() { S sM = new S("test"); S sM2 = new S("test2"); S sM3 = new S("test3"); S sM4 = new S("test4"); c(sM, sM2, sM3, sM4); return 100; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; struct Pad { #pragma warning disable 0414 public double d1; public double d2; public double d3; public double d4; public double d5; public double d6; public double d7; public double d8; public double d9; public double d10; public double d11; public double d12; public double d13; public double d14; public double d15; public double d16; public double d17; public double d18; public double d19; public double d20; public double d21; public double d22; public double d23; public double d24; public double d25; public double d26; public double d27; public double d28; #pragma warning restore 0414 } struct S { #pragma warning disable 0414 public String str2; #pragma warning restore 0414 public String str; public Pad pad; public S(String s) { str = s; str2 = s + str; pad.d1 = pad.d2 = pad.d3 = pad.d4 = pad.d5 = pad.d6 = pad.d7 = pad.d8 = pad.d9 = pad.d10 = pad.d11 = pad.d12 = pad.d13 = pad.d14 = pad.d15 = pad.d16 = pad.d17 = pad.d18 = pad.d19 = pad.d20 = pad.d21 = pad.d22 = pad.d23 = pad.d24 = pad.d25 = pad.d26 = pad.d27 = pad.d28 = 3.3; } } class Test_struct5_4 { public static void c(S s1, S s2, S s3, S s4) { Console.WriteLine(s1.str + s2.str + s3.str + s4.str); } public static int Main() { S sM = new S("test"); S sM2 = new S("test2"); S sM3 = new S("test3"); S sM4 = new S("test4"); c(sM, sM2, sM3, sM4); return 100; } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/CspKeyContainerInfo.Windows.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.Versioning; namespace System.Security.Cryptography { [SupportedOSPlatform("windows")] public sealed class CspKeyContainerInfo { private readonly CspParameters _parameters; private readonly bool _randomKeyContainer; //Public Constructor will call internal constructor. public CspKeyContainerInfo(CspParameters parameters) : this(parameters, false) { } /// <summary> ///Internal constructor for creating the CspKeyContainerInfo object /// </summary> /// <param name="parameters">CSP parameters</param> /// <param name="randomKeyContainer">Is it random container</param> internal CspKeyContainerInfo(CspParameters parameters, bool randomKeyContainer) { _parameters = new CspParameters(parameters); if (_parameters.KeyNumber == -1) { if (_parameters.ProviderType == (int)CapiHelper.ProviderType.PROV_RSA_FULL || _parameters.ProviderType == (int)CapiHelper.ProviderType.PROV_RSA_AES) { _parameters.KeyNumber = (int)KeyNumber.Exchange; } else if (_parameters.ProviderType == (int)CapiHelper.ProviderType.PROV_DSS_DH) { _parameters.KeyNumber = (int)KeyNumber.Signature; } } _randomKeyContainer = randomKeyContainer; } /// <summary> /// Check the key is accessible /// </summary> public bool Accessible { get { object? retVal = ReadKeyParameterSilent(CapiHelper.ClrPropertyId.CLR_ACCESSIBLE, throwOnNotFound: false); if (retVal == null) { // The key wasn't found, so consider it to be not accessible. return false; } return (bool)retVal; } } /// <summary> /// Check the key is exportable /// </summary> public bool Exportable { get { // Assume hardware keys are not exportable. if (HardwareDevice) { return false; } return (bool)ReadKeyParameterSilent(CapiHelper.ClrPropertyId.CLR_EXPORTABLE)!; } } /// <summary> /// Check if device with key is HW device /// </summary> public bool HardwareDevice { get { return (bool)ReadDeviceParameterVerifyContext(CapiHelper.ClrPropertyId.CLR_HARDWARE); } } /// <summary> /// Get Key container Name /// </summary> public string? KeyContainerName { get { return _parameters.KeyContainerName; } } /// <summary> /// Get the key number /// </summary> public KeyNumber KeyNumber { get { return (KeyNumber)_parameters.KeyNumber; } } /// <summary> /// Check if machine key store is in flag or not /// </summary> public bool MachineKeyStore { get { return CapiHelper.IsFlagBitSet((uint)_parameters.Flags, (uint)CspProviderFlags.UseMachineKeyStore); } } /// <summary> /// Check if key is protected /// </summary> public bool Protected { get { // Assume hardware keys are protected. if (HardwareDevice) { return true; } return (bool)ReadKeyParameterSilent(CapiHelper.ClrPropertyId.CLR_PROTECTED)!; } } /// <summary> /// Gets the provider name /// </summary> public string? ProviderName { get { return _parameters.ProviderName; } } /// <summary> /// Gets the provider type /// </summary> public int ProviderType { get { return _parameters.ProviderType; } } /// <summary> /// Check if key container is randomly generated /// </summary> public bool RandomlyGenerated { get { return _randomKeyContainer; } } /// <summary> /// Check if container is removable /// </summary> public bool Removable { get { return (bool)ReadDeviceParameterVerifyContext(CapiHelper.ClrPropertyId.CLR_REMOVABLE); } } /// <summary> /// Get the container name /// </summary> public string UniqueKeyContainerName { get { return (string)ReadKeyParameterSilent(CapiHelper.ClrPropertyId.CLR_UNIQUE_CONTAINER)!; } } /// <summary> /// Read a parameter from the current key using CRYPT_SILENT, to avoid any potential UI prompts. /// </summary> private object? ReadKeyParameterSilent(CapiHelper.ClrPropertyId keyParam, bool throwOnNotFound = true) { const uint SilentFlags = (uint)Interop.Advapi32.CryptAcquireContextFlags.CRYPT_SILENT; SafeProvHandle safeProvHandle; int hr = CapiHelper.OpenCSP(_parameters, SilentFlags, out safeProvHandle); using (safeProvHandle) { if (hr != CapiHelper.S_OK) { if (throwOnNotFound) { throw new CryptographicException(SR.Cryptography_CSP_NotFound); } return null; } object retVal = CapiHelper.GetProviderParameter(safeProvHandle, _parameters.KeyNumber, keyParam); return retVal; } } /// <summary> /// Read a parameter using VERIFY_CONTEXT to read from the device being targeted by _parameters /// </summary> private object ReadDeviceParameterVerifyContext(CapiHelper.ClrPropertyId keyParam) { CspParameters parameters = new CspParameters(_parameters); // We're asking questions of the device container, the only flag that makes sense is Machine vs User. parameters.Flags &= CspProviderFlags.UseMachineKeyStore; // In order to ask about the device, instead of a key, we need to ensure that no key is named. parameters.KeyContainerName = null; const uint OpenDeviceFlags = (uint)Interop.Advapi32.CryptAcquireContextFlags.CRYPT_VERIFYCONTEXT; SafeProvHandle safeProvHandle; int hr = CapiHelper.OpenCSP(parameters, OpenDeviceFlags, out safeProvHandle); using (safeProvHandle) { if (hr != CapiHelper.S_OK) { throw new CryptographicException(SR.Cryptography_CSP_NotFound); } object retVal = CapiHelper.GetProviderParameter(safeProvHandle, parameters.KeyNumber, keyParam); return retVal; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.Versioning; namespace System.Security.Cryptography { [SupportedOSPlatform("windows")] public sealed class CspKeyContainerInfo { private readonly CspParameters _parameters; private readonly bool _randomKeyContainer; //Public Constructor will call internal constructor. public CspKeyContainerInfo(CspParameters parameters) : this(parameters, false) { } /// <summary> ///Internal constructor for creating the CspKeyContainerInfo object /// </summary> /// <param name="parameters">CSP parameters</param> /// <param name="randomKeyContainer">Is it random container</param> internal CspKeyContainerInfo(CspParameters parameters, bool randomKeyContainer) { _parameters = new CspParameters(parameters); if (_parameters.KeyNumber == -1) { if (_parameters.ProviderType == (int)CapiHelper.ProviderType.PROV_RSA_FULL || _parameters.ProviderType == (int)CapiHelper.ProviderType.PROV_RSA_AES) { _parameters.KeyNumber = (int)KeyNumber.Exchange; } else if (_parameters.ProviderType == (int)CapiHelper.ProviderType.PROV_DSS_DH) { _parameters.KeyNumber = (int)KeyNumber.Signature; } } _randomKeyContainer = randomKeyContainer; } /// <summary> /// Check the key is accessible /// </summary> public bool Accessible { get { object? retVal = ReadKeyParameterSilent(CapiHelper.ClrPropertyId.CLR_ACCESSIBLE, throwOnNotFound: false); if (retVal == null) { // The key wasn't found, so consider it to be not accessible. return false; } return (bool)retVal; } } /// <summary> /// Check the key is exportable /// </summary> public bool Exportable { get { // Assume hardware keys are not exportable. if (HardwareDevice) { return false; } return (bool)ReadKeyParameterSilent(CapiHelper.ClrPropertyId.CLR_EXPORTABLE)!; } } /// <summary> /// Check if device with key is HW device /// </summary> public bool HardwareDevice { get { return (bool)ReadDeviceParameterVerifyContext(CapiHelper.ClrPropertyId.CLR_HARDWARE); } } /// <summary> /// Get Key container Name /// </summary> public string? KeyContainerName { get { return _parameters.KeyContainerName; } } /// <summary> /// Get the key number /// </summary> public KeyNumber KeyNumber { get { return (KeyNumber)_parameters.KeyNumber; } } /// <summary> /// Check if machine key store is in flag or not /// </summary> public bool MachineKeyStore { get { return CapiHelper.IsFlagBitSet((uint)_parameters.Flags, (uint)CspProviderFlags.UseMachineKeyStore); } } /// <summary> /// Check if key is protected /// </summary> public bool Protected { get { // Assume hardware keys are protected. if (HardwareDevice) { return true; } return (bool)ReadKeyParameterSilent(CapiHelper.ClrPropertyId.CLR_PROTECTED)!; } } /// <summary> /// Gets the provider name /// </summary> public string? ProviderName { get { return _parameters.ProviderName; } } /// <summary> /// Gets the provider type /// </summary> public int ProviderType { get { return _parameters.ProviderType; } } /// <summary> /// Check if key container is randomly generated /// </summary> public bool RandomlyGenerated { get { return _randomKeyContainer; } } /// <summary> /// Check if container is removable /// </summary> public bool Removable { get { return (bool)ReadDeviceParameterVerifyContext(CapiHelper.ClrPropertyId.CLR_REMOVABLE); } } /// <summary> /// Get the container name /// </summary> public string UniqueKeyContainerName { get { return (string)ReadKeyParameterSilent(CapiHelper.ClrPropertyId.CLR_UNIQUE_CONTAINER)!; } } /// <summary> /// Read a parameter from the current key using CRYPT_SILENT, to avoid any potential UI prompts. /// </summary> private object? ReadKeyParameterSilent(CapiHelper.ClrPropertyId keyParam, bool throwOnNotFound = true) { const uint SilentFlags = (uint)Interop.Advapi32.CryptAcquireContextFlags.CRYPT_SILENT; SafeProvHandle safeProvHandle; int hr = CapiHelper.OpenCSP(_parameters, SilentFlags, out safeProvHandle); using (safeProvHandle) { if (hr != CapiHelper.S_OK) { if (throwOnNotFound) { throw new CryptographicException(SR.Cryptography_CSP_NotFound); } return null; } object retVal = CapiHelper.GetProviderParameter(safeProvHandle, _parameters.KeyNumber, keyParam); return retVal; } } /// <summary> /// Read a parameter using VERIFY_CONTEXT to read from the device being targeted by _parameters /// </summary> private object ReadDeviceParameterVerifyContext(CapiHelper.ClrPropertyId keyParam) { CspParameters parameters = new CspParameters(_parameters); // We're asking questions of the device container, the only flag that makes sense is Machine vs User. parameters.Flags &= CspProviderFlags.UseMachineKeyStore; // In order to ask about the device, instead of a key, we need to ensure that no key is named. parameters.KeyContainerName = null; const uint OpenDeviceFlags = (uint)Interop.Advapi32.CryptAcquireContextFlags.CRYPT_VERIFYCONTEXT; SafeProvHandle safeProvHandle; int hr = CapiHelper.OpenCSP(parameters, OpenDeviceFlags, out safeProvHandle); using (safeProvHandle) { if (hr != CapiHelper.S_OK) { throw new CryptographicException(SR.Cryptography_CSP_NotFound); } object retVal = CapiHelper.GetProviderParameter(safeProvHandle, parameters.KeyNumber, keyParam); return retVal; } } } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/libraries/System.ComponentModel.Annotations/tests/System/ComponentModel/DataAnnotations/CompareAttributeTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Reflection; using Xunit; namespace System.ComponentModel.DataAnnotations.Tests { public class CompareAttributeTests : ValidationAttributeTestBase { protected override IEnumerable<TestCase> ValidValues() => new TestCase[] { new TestCase(new CompareAttribute("CompareProperty"), "test", new ValidationContext(new CompareObject("test"))), new TestCase(new DerivedCompareAttribute("CompareProperty"), "a", new ValidationContext(new CompareObject("b"))) }; private static ValidationContext s_context = new ValidationContext(new CompareObject("a")) { DisplayName = "CurrentProperty" }; protected override IEnumerable<TestCase> InvalidValues() => new TestCase[] { new TestCase(new CompareAttribute(nameof(CompareObject.CompareProperty)), "b", s_context), new TestCase(new CompareAttribute(nameof(CompareObject.ComparePropertyWithDisplayName)), "b", s_context), new TestCase(new CompareAttribute("NoSuchProperty"), "b", s_context), new TestCase(new CompareAttribute(nameof(CompareObject.CompareProperty)), "b", new ValidationContext(new CompareObjectSubClass("a"))) }; [Fact] public static void Constructor_NullOtherProperty_ThrowsArgumentNullException() { AssertExtensions.Throws<ArgumentNullException>("otherProperty", () => new CompareAttribute(null)); } [Theory] [InlineData("OtherProperty")] [InlineData("")] public static void Constructor(string otherProperty) { CompareAttribute attribute = new CompareAttribute(otherProperty); Assert.Equal(otherProperty, attribute.OtherProperty); Assert.True(attribute.RequiresValidationContext); } [Fact] [SkipOnTargetFramework(~TargetFrameworkMonikers.NetFramework)] public static void Validate_Indexer_ThrowsTargetParameterCountException_Netfx() { CompareAttribute attribute = new CompareAttribute("Item"); Assert.Throws<TargetParameterCountException>(() => attribute.Validate("b", s_context)); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework)] public static void Validate_Indexer_ThrowsArgumentException_Netcoreapp() { CompareAttribute attribute = new CompareAttribute("Item"); AssertExtensions.Throws<ArgumentException>(null, () => attribute.Validate("b", s_context)); } [Fact] public static void Validate_SetOnlyProperty_ThrowsArgumentException() { CompareAttribute attribute = new CompareAttribute(nameof(CompareObject.SetOnlyProperty)); AssertExtensions.Throws<ArgumentException>(null, () => attribute.Validate("b", s_context)); } [Fact] public static void Validate_LowerAndUpperPropertyName_Success() { CompareAttribute attribute = new CompareAttribute(nameof(CompareObject.comparepropertycased)); Assert.NotNull(attribute.GetValidationResult("b", s_context).ErrorMessage); Assert.Equal(ValidationResult.Success, attribute.GetValidationResult(null, s_context)); Assert.Equal(nameof(CompareObject.comparepropertycased), attribute.OtherPropertyDisplayName); } [Fact] [SkipOnTargetFramework(~TargetFrameworkMonikers.NetFramework)] public static void Validate_IncludesMemberName_NetFx() { ValidationContext validationContext = new ValidationContext(new CompareObject("a")); validationContext.MemberName = nameof(CompareObject.CompareProperty); CompareAttribute attribute = new CompareAttribute(nameof(CompareObject.ComparePropertyCased)); ValidationResult validationResult = attribute.GetValidationResult("b", validationContext); Assert.NotNull(validationResult.ErrorMessage); Assert.Empty(validationResult.MemberNames); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework)] public static void Validate_IncludesMemberName_Netcoreapp() { ValidationContext validationContext = new ValidationContext(new CompareObject("a")); validationContext.MemberName = nameof(CompareObject.CompareProperty); CompareAttribute attribute = new CompareAttribute(nameof(CompareObject.ComparePropertyCased)); ValidationResult validationResult = attribute.GetValidationResult("b", validationContext); Assert.NotNull(validationResult.ErrorMessage); Assert.Equal(new[] { nameof(CompareObject.CompareProperty) }, validationResult.MemberNames); } [Fact] public static void Validate_PrivateProperty_ThrowsArgumentException() { CompareAttribute attribute = new CompareAttribute("PrivateProperty"); Assert.Throws<ValidationException>(() => attribute.Validate("b", s_context)); } [Fact] public static void Validate_PropertyHasDisplayName_UpdatesFormatErrorMessageToContainDisplayName() { CompareAttribute attribute = new CompareAttribute(nameof(CompareObject.ComparePropertyWithDisplayName)); string oldErrorMessage = attribute.FormatErrorMessage("name"); Assert.DoesNotContain("CustomDisplayName", oldErrorMessage); Assert.Throws<ValidationException>(() => attribute.Validate("test1", new ValidationContext(new CompareObject("test")))); string newErrorMessage = attribute.FormatErrorMessage("name"); Assert.NotEqual(oldErrorMessage, newErrorMessage); Assert.Contains("CustomDisplayName", newErrorMessage); } private class DerivedCompareAttribute : CompareAttribute { public DerivedCompareAttribute(string otherProperty) : base(otherProperty) { } protected override ValidationResult IsValid(object value, ValidationContext context) => ValidationResult.Success; } private class CompareObject { public string CompareProperty { get; set; } [Display(Name = "CustomDisplayName")] public string ComparePropertyWithDisplayName { get; set; } public string this[int index] { get { return "abc"; } set { } } public string SetOnlyProperty { set { } } private string PrivateProperty { get; set; } public string ComparePropertyCased { get; set; } public string comparepropertycased { get; set; } public CompareObject(string otherValue) { CompareProperty = otherValue; ComparePropertyWithDisplayName = otherValue; } } private class CompareObjectSubClass : CompareObject { public CompareObjectSubClass(string otherValue) : base(otherValue) { } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Reflection; using Xunit; namespace System.ComponentModel.DataAnnotations.Tests { public class CompareAttributeTests : ValidationAttributeTestBase { protected override IEnumerable<TestCase> ValidValues() => new TestCase[] { new TestCase(new CompareAttribute("CompareProperty"), "test", new ValidationContext(new CompareObject("test"))), new TestCase(new DerivedCompareAttribute("CompareProperty"), "a", new ValidationContext(new CompareObject("b"))) }; private static ValidationContext s_context = new ValidationContext(new CompareObject("a")) { DisplayName = "CurrentProperty" }; protected override IEnumerable<TestCase> InvalidValues() => new TestCase[] { new TestCase(new CompareAttribute(nameof(CompareObject.CompareProperty)), "b", s_context), new TestCase(new CompareAttribute(nameof(CompareObject.ComparePropertyWithDisplayName)), "b", s_context), new TestCase(new CompareAttribute("NoSuchProperty"), "b", s_context), new TestCase(new CompareAttribute(nameof(CompareObject.CompareProperty)), "b", new ValidationContext(new CompareObjectSubClass("a"))) }; [Fact] public static void Constructor_NullOtherProperty_ThrowsArgumentNullException() { AssertExtensions.Throws<ArgumentNullException>("otherProperty", () => new CompareAttribute(null)); } [Theory] [InlineData("OtherProperty")] [InlineData("")] public static void Constructor(string otherProperty) { CompareAttribute attribute = new CompareAttribute(otherProperty); Assert.Equal(otherProperty, attribute.OtherProperty); Assert.True(attribute.RequiresValidationContext); } [Fact] [SkipOnTargetFramework(~TargetFrameworkMonikers.NetFramework)] public static void Validate_Indexer_ThrowsTargetParameterCountException_Netfx() { CompareAttribute attribute = new CompareAttribute("Item"); Assert.Throws<TargetParameterCountException>(() => attribute.Validate("b", s_context)); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework)] public static void Validate_Indexer_ThrowsArgumentException_Netcoreapp() { CompareAttribute attribute = new CompareAttribute("Item"); AssertExtensions.Throws<ArgumentException>(null, () => attribute.Validate("b", s_context)); } [Fact] public static void Validate_SetOnlyProperty_ThrowsArgumentException() { CompareAttribute attribute = new CompareAttribute(nameof(CompareObject.SetOnlyProperty)); AssertExtensions.Throws<ArgumentException>(null, () => attribute.Validate("b", s_context)); } [Fact] public static void Validate_LowerAndUpperPropertyName_Success() { CompareAttribute attribute = new CompareAttribute(nameof(CompareObject.comparepropertycased)); Assert.NotNull(attribute.GetValidationResult("b", s_context).ErrorMessage); Assert.Equal(ValidationResult.Success, attribute.GetValidationResult(null, s_context)); Assert.Equal(nameof(CompareObject.comparepropertycased), attribute.OtherPropertyDisplayName); } [Fact] [SkipOnTargetFramework(~TargetFrameworkMonikers.NetFramework)] public static void Validate_IncludesMemberName_NetFx() { ValidationContext validationContext = new ValidationContext(new CompareObject("a")); validationContext.MemberName = nameof(CompareObject.CompareProperty); CompareAttribute attribute = new CompareAttribute(nameof(CompareObject.ComparePropertyCased)); ValidationResult validationResult = attribute.GetValidationResult("b", validationContext); Assert.NotNull(validationResult.ErrorMessage); Assert.Empty(validationResult.MemberNames); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework)] public static void Validate_IncludesMemberName_Netcoreapp() { ValidationContext validationContext = new ValidationContext(new CompareObject("a")); validationContext.MemberName = nameof(CompareObject.CompareProperty); CompareAttribute attribute = new CompareAttribute(nameof(CompareObject.ComparePropertyCased)); ValidationResult validationResult = attribute.GetValidationResult("b", validationContext); Assert.NotNull(validationResult.ErrorMessage); Assert.Equal(new[] { nameof(CompareObject.CompareProperty) }, validationResult.MemberNames); } [Fact] public static void Validate_PrivateProperty_ThrowsArgumentException() { CompareAttribute attribute = new CompareAttribute("PrivateProperty"); Assert.Throws<ValidationException>(() => attribute.Validate("b", s_context)); } [Fact] public static void Validate_PropertyHasDisplayName_UpdatesFormatErrorMessageToContainDisplayName() { CompareAttribute attribute = new CompareAttribute(nameof(CompareObject.ComparePropertyWithDisplayName)); string oldErrorMessage = attribute.FormatErrorMessage("name"); Assert.DoesNotContain("CustomDisplayName", oldErrorMessage); Assert.Throws<ValidationException>(() => attribute.Validate("test1", new ValidationContext(new CompareObject("test")))); string newErrorMessage = attribute.FormatErrorMessage("name"); Assert.NotEqual(oldErrorMessage, newErrorMessage); Assert.Contains("CustomDisplayName", newErrorMessage); } private class DerivedCompareAttribute : CompareAttribute { public DerivedCompareAttribute(string otherProperty) : base(otherProperty) { } protected override ValidationResult IsValid(object value, ValidationContext context) => ValidationResult.Success; } private class CompareObject { public string CompareProperty { get; set; } [Display(Name = "CustomDisplayName")] public string ComparePropertyWithDisplayName { get; set; } public string this[int index] { get { return "abc"; } set { } } public string SetOnlyProperty { set { } } private string PrivateProperty { get; set; } public string ComparePropertyCased { get; set; } public string comparepropertycased { get; set; } public CompareObject(string otherValue) { CompareProperty = otherValue; ComparePropertyWithDisplayName = otherValue; } } private class CompareObjectSubClass : CompareObject { public CompareObjectSubClass(string otherValue) : base(otherValue) { } } } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/JIT/Performance/CodeQuality/Benchstones/BenchI/Ackermann/Ackermann.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; using System.Runtime.CompilerServices; namespace Benchstone.BenchI { public static class Ackermann { #if DEBUG public const int Iterations = 1; #else public const int Iterations = 100000; #endif static int Acker(int m, int n) { if (m == 0) { return n + 1; } else if (n == 0) { return Acker(m - 1, 1); } else { return Acker(m - 1, Acker(m, n - 1)); } } [MethodImpl(MethodImplOptions.NoInlining)] static bool Bench() { int a00 = Acker(0, 0); int a11 = Acker(1, 1); int a22 = Acker(2, 2); int a33 = Acker(3, 3); return (a00 == 1) && (a11 == 3) && (a22 == 7) && (a33 == 61); } static bool TestBase() { bool result = true; for (int i = 0; i < Iterations; i++) { result &= Bench(); } return result; } public static int Main() { bool result = TestBase(); return (result ? 100 : -1); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; using System.Runtime.CompilerServices; namespace Benchstone.BenchI { public static class Ackermann { #if DEBUG public const int Iterations = 1; #else public const int Iterations = 100000; #endif static int Acker(int m, int n) { if (m == 0) { return n + 1; } else if (n == 0) { return Acker(m - 1, 1); } else { return Acker(m - 1, Acker(m, n - 1)); } } [MethodImpl(MethodImplOptions.NoInlining)] static bool Bench() { int a00 = Acker(0, 0); int a11 = Acker(1, 1); int a22 = Acker(2, 2); int a33 = Acker(3, 3); return (a00 == 1) && (a11 == 3) && (a22 == 7) && (a33 == 61); } static bool TestBase() { bool result = true; for (int i = 0; i < Iterations; i++) { result &= Bench(); } return result; } public static int Main() { bool result = TestBase(); return (result ? 100 : -1); } } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/ILStreamReader.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Internal.TypeSystem; using Internal.IL; namespace Internal.Compiler { /// <summary> /// IL Opcode reader in external reader style where the reading is done by trying to read /// various opcodes, and the reader can indicate success or failure of reading a particular opcode /// /// Used by logic which is designed to encode information in il structure, but not used /// to support general compilation of IL. /// </summary> public struct ILStreamReader { private ILReader _reader; private readonly MethodIL _methodIL; public ILStreamReader(MethodIL methodIL) { _methodIL = methodIL; _reader = new ILReader(methodIL.GetILBytes()); } public bool HasNextInstruction { get { return _reader.HasNext; } } public bool TryReadLdtoken(out int token) { if (_reader.PeekILOpcode() != ILOpcode.ldtoken) { token = 0; return false; } _reader.ReadILOpcode(); token = _reader.ReadILToken(); return true; } public int ReadLdtoken() { int result; if (!TryReadLdtoken(out result)) throw new BadImageFormatException(); return result; } public bool TryReadLdtokenAsTypeSystemEntity(out TypeSystemEntity entity) { int token; bool tokenResolved; try { tokenResolved = TryReadLdtoken(out token); entity = tokenResolved ? (TypeSystemEntity)_methodIL.GetObject(token) : null; } catch (TypeSystemException) { tokenResolved = false; entity = null; } return tokenResolved; } public TypeSystemEntity ReadLdtokenAsTypeSystemEntity() { TypeSystemEntity result; if (!TryReadLdtokenAsTypeSystemEntity(out result)) throw new BadImageFormatException(); return result; } public bool TryReadLdcI4(out int value) { ILOpcode opcode = _reader.PeekILOpcode(); if (opcode == ILOpcode.ldc_i4) // ldc.i4 { _reader.ReadILOpcode(); value = unchecked((int)_reader.ReadILUInt32()); return true; } if ((opcode >= ILOpcode.ldc_i4_m1) && (opcode <= ILOpcode.ldc_i4_8)) // ldc.m1 to ldc.i4.8 { _reader.ReadILOpcode(); value = -1 + ((int)opcode) - 0x15; return true; } if (opcode == ILOpcode.ldc_i4_s) // ldc.i4.s { _reader.ReadILOpcode(); value = (int)unchecked((sbyte)_reader.ReadILByte()); return true; } value = 0; return false; } public int ReadLdcI4() { int result; if (!TryReadLdcI4(out result)) throw new BadImageFormatException(); return result; } public bool TryReadRet() { ILOpcode opcode = _reader.PeekILOpcode(); if (opcode == ILOpcode.ret) { _reader.ReadILOpcode(); return true; } return false; } public void ReadRet() { if (!TryReadRet()) throw new BadImageFormatException(); } public bool TryReadPop() { ILOpcode opcode = _reader.PeekILOpcode(); if (opcode == ILOpcode.pop) { _reader.ReadILOpcode(); return true; } return false; } public void ReadPop() { if (!TryReadPop()) throw new BadImageFormatException(); } public bool TryReadLdstr(out string ldstrString) { if (_reader.PeekILOpcode() != ILOpcode.ldstr) { ldstrString = null; return false; } _reader.ReadILOpcode(); int token = _reader.ReadILToken(); ldstrString = (string)_methodIL.GetObject(token); return true; } public string ReadLdstr() { string result; if (!TryReadLdstr(out result)) throw new BadImageFormatException(); return result; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Internal.TypeSystem; using Internal.IL; namespace Internal.Compiler { /// <summary> /// IL Opcode reader in external reader style where the reading is done by trying to read /// various opcodes, and the reader can indicate success or failure of reading a particular opcode /// /// Used by logic which is designed to encode information in il structure, but not used /// to support general compilation of IL. /// </summary> public struct ILStreamReader { private ILReader _reader; private readonly MethodIL _methodIL; public ILStreamReader(MethodIL methodIL) { _methodIL = methodIL; _reader = new ILReader(methodIL.GetILBytes()); } public bool HasNextInstruction { get { return _reader.HasNext; } } public bool TryReadLdtoken(out int token) { if (_reader.PeekILOpcode() != ILOpcode.ldtoken) { token = 0; return false; } _reader.ReadILOpcode(); token = _reader.ReadILToken(); return true; } public int ReadLdtoken() { int result; if (!TryReadLdtoken(out result)) throw new BadImageFormatException(); return result; } public bool TryReadLdtokenAsTypeSystemEntity(out TypeSystemEntity entity) { int token; bool tokenResolved; try { tokenResolved = TryReadLdtoken(out token); entity = tokenResolved ? (TypeSystemEntity)_methodIL.GetObject(token) : null; } catch (TypeSystemException) { tokenResolved = false; entity = null; } return tokenResolved; } public TypeSystemEntity ReadLdtokenAsTypeSystemEntity() { TypeSystemEntity result; if (!TryReadLdtokenAsTypeSystemEntity(out result)) throw new BadImageFormatException(); return result; } public bool TryReadLdcI4(out int value) { ILOpcode opcode = _reader.PeekILOpcode(); if (opcode == ILOpcode.ldc_i4) // ldc.i4 { _reader.ReadILOpcode(); value = unchecked((int)_reader.ReadILUInt32()); return true; } if ((opcode >= ILOpcode.ldc_i4_m1) && (opcode <= ILOpcode.ldc_i4_8)) // ldc.m1 to ldc.i4.8 { _reader.ReadILOpcode(); value = -1 + ((int)opcode) - 0x15; return true; } if (opcode == ILOpcode.ldc_i4_s) // ldc.i4.s { _reader.ReadILOpcode(); value = (int)unchecked((sbyte)_reader.ReadILByte()); return true; } value = 0; return false; } public int ReadLdcI4() { int result; if (!TryReadLdcI4(out result)) throw new BadImageFormatException(); return result; } public bool TryReadRet() { ILOpcode opcode = _reader.PeekILOpcode(); if (opcode == ILOpcode.ret) { _reader.ReadILOpcode(); return true; } return false; } public void ReadRet() { if (!TryReadRet()) throw new BadImageFormatException(); } public bool TryReadPop() { ILOpcode opcode = _reader.PeekILOpcode(); if (opcode == ILOpcode.pop) { _reader.ReadILOpcode(); return true; } return false; } public void ReadPop() { if (!TryReadPop()) throw new BadImageFormatException(); } public bool TryReadLdstr(out string ldstrString) { if (_reader.PeekILOpcode() != ILOpcode.ldstr) { ldstrString = null; return false; } _reader.ReadILOpcode(); int token = _reader.ReadILToken(); ldstrString = (string)_methodIL.GetObject(token); return true; } public string ReadLdstr() { string result; if (!TryReadLdstr(out result)) throw new BadImageFormatException(); return result; } } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/Design/Serialization/RootDesignerSerializerAttribute.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.ComponentModel.Design.Serialization { /// <summary> /// This attribute can be placed on a class to indicate what serialization /// object should be used to serialize the class at design time if it is /// being used as a root object. /// </summary> [AttributeUsage(AttributeTargets.Class | AttributeTargets.Interface, AllowMultiple = true, Inherited = true)] [Obsolete("RootDesignerSerializerAttribute has been deprecated. Use DesignerSerializerAttribute instead. For example, to specify a root designer for CodeDom, use DesignerSerializerAttribute(...,typeof(TypeCodeDomSerializer)) instead.")] public sealed class RootDesignerSerializerAttribute : Attribute { private string? _typeId; /// <summary> /// Creates a new designer serialization attribute. /// </summary> public RootDesignerSerializerAttribute(Type serializerType!!, Type baseSerializerType!!, bool reloadable) { SerializerTypeName = serializerType.AssemblyQualifiedName; SerializerBaseTypeName = baseSerializerType.AssemblyQualifiedName; Reloadable = reloadable; } /// <summary> /// Creates a new designer serialization attribute. /// </summary> public RootDesignerSerializerAttribute(string serializerTypeName, Type baseSerializerType!!, bool reloadable) { SerializerTypeName = serializerTypeName; SerializerBaseTypeName = baseSerializerType.AssemblyQualifiedName; Reloadable = reloadable; } /// <summary> /// Creates a new designer serialization attribute. /// </summary> public RootDesignerSerializerAttribute(string? serializerTypeName, string? baseSerializerTypeName, bool reloadable) { SerializerTypeName = serializerTypeName; SerializerBaseTypeName = baseSerializerTypeName; Reloadable = reloadable; } /// <summary> /// Indicates that this root serializer supports reloading. If false, the design document /// will not automatically perform a reload on behalf of the user. It will be the user's /// responsibility to reload the document themselves. /// </summary> public bool Reloadable { get; } /// <summary> /// Retrieves the fully qualified type name of the serializer. /// </summary> public string? SerializerTypeName { get; } /// <summary> /// Retrieves the fully qualified type name of the serializer base type. /// </summary> public string? SerializerBaseTypeName { get; } /// <summary> /// This defines a unique ID for this attribute type. It is used /// by filtering algorithms to identify two attributes that are /// the same type. For most attributes, this just returns the /// Type instance for the attribute. EditorAttribute overrides /// this to include the type of the editor base type. /// </summary> public override object TypeId { get { if (_typeId == null) { string baseType = SerializerBaseTypeName ?? string.Empty; int comma = baseType.IndexOf(','); if (comma != -1) { baseType = baseType.Substring(0, comma); } _typeId = GetType().FullName + baseType; } return _typeId; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.ComponentModel.Design.Serialization { /// <summary> /// This attribute can be placed on a class to indicate what serialization /// object should be used to serialize the class at design time if it is /// being used as a root object. /// </summary> [AttributeUsage(AttributeTargets.Class | AttributeTargets.Interface, AllowMultiple = true, Inherited = true)] [Obsolete("RootDesignerSerializerAttribute has been deprecated. Use DesignerSerializerAttribute instead. For example, to specify a root designer for CodeDom, use DesignerSerializerAttribute(...,typeof(TypeCodeDomSerializer)) instead.")] public sealed class RootDesignerSerializerAttribute : Attribute { private string? _typeId; /// <summary> /// Creates a new designer serialization attribute. /// </summary> public RootDesignerSerializerAttribute(Type serializerType!!, Type baseSerializerType!!, bool reloadable) { SerializerTypeName = serializerType.AssemblyQualifiedName; SerializerBaseTypeName = baseSerializerType.AssemblyQualifiedName; Reloadable = reloadable; } /// <summary> /// Creates a new designer serialization attribute. /// </summary> public RootDesignerSerializerAttribute(string serializerTypeName, Type baseSerializerType!!, bool reloadable) { SerializerTypeName = serializerTypeName; SerializerBaseTypeName = baseSerializerType.AssemblyQualifiedName; Reloadable = reloadable; } /// <summary> /// Creates a new designer serialization attribute. /// </summary> public RootDesignerSerializerAttribute(string? serializerTypeName, string? baseSerializerTypeName, bool reloadable) { SerializerTypeName = serializerTypeName; SerializerBaseTypeName = baseSerializerTypeName; Reloadable = reloadable; } /// <summary> /// Indicates that this root serializer supports reloading. If false, the design document /// will not automatically perform a reload on behalf of the user. It will be the user's /// responsibility to reload the document themselves. /// </summary> public bool Reloadable { get; } /// <summary> /// Retrieves the fully qualified type name of the serializer. /// </summary> public string? SerializerTypeName { get; } /// <summary> /// Retrieves the fully qualified type name of the serializer base type. /// </summary> public string? SerializerBaseTypeName { get; } /// <summary> /// This defines a unique ID for this attribute type. It is used /// by filtering algorithms to identify two attributes that are /// the same type. For most attributes, this just returns the /// Type instance for the attribute. EditorAttribute overrides /// this to include the type of the editor base type. /// </summary> public override object TypeId { get { if (_typeId == null) { string baseType = SerializerBaseTypeName ?? string.Empty; int comma = baseType.IndexOf(','); if (comma != -1) { baseType = baseType.Substring(0, comma); } _typeId = GetType().FullName + baseType; } return _typeId; } } } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/wasm/debugger/debugger.sln
Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio 15 Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "DebuggerTestSuite", "DebuggerTestSuite\DebuggerTestSuite.csproj", "{FF4CBDBF-EAC9-4531-A0F7-F3B834182862}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "lazy-debugger-test", "tests\lazy-debugger-test\lazy-debugger-test.csproj", "{BB824B8F-3E3B-48AB-86DB-168D15121372}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "debugger-test", "tests\debugger-test\debugger-test.csproj", "{6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "BrowserDebugProxy", "BrowserDebugProxy\BrowserDebugProxy.csproj", "{532E9E35-D877-41A6-81F4-505AEA17AA04}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "BrowserDebugHost", "BrowserDebugHost\BrowserDebugHost.csproj", "{E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "lazy-debugger-test-embedded", "tests\lazy-debugger-test-embedded\lazy-debugger-test-embedded.csproj", "{1E2F50AA-DA21-4D71-AF07-F38B68026CAA}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|x64 = Debug|x64 Debug|x86 = Debug|x86 Release|x64 = Release|x64 Release|x86 = Release|x86 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {FF4CBDBF-EAC9-4531-A0F7-F3B834182862}.Debug|x64.ActiveCfg = Debug|x64 {FF4CBDBF-EAC9-4531-A0F7-F3B834182862}.Debug|x64.Build.0 = Debug|x64 {FF4CBDBF-EAC9-4531-A0F7-F3B834182862}.Debug|x86.ActiveCfg = Debug|x86 {FF4CBDBF-EAC9-4531-A0F7-F3B834182862}.Debug|x86.Build.0 = Debug|x86 {FF4CBDBF-EAC9-4531-A0F7-F3B834182862}.Release|x64.ActiveCfg = Release|x64 {FF4CBDBF-EAC9-4531-A0F7-F3B834182862}.Release|x64.Build.0 = Release|x64 {FF4CBDBF-EAC9-4531-A0F7-F3B834182862}.Release|x86.ActiveCfg = Release|x86 {FF4CBDBF-EAC9-4531-A0F7-F3B834182862}.Release|x86.Build.0 = Release|x86 {BB824B8F-3E3B-48AB-86DB-168D15121372}.Debug|x64.ActiveCfg = Debug|x64 {BB824B8F-3E3B-48AB-86DB-168D15121372}.Debug|x64.Build.0 = Debug|x64 {BB824B8F-3E3B-48AB-86DB-168D15121372}.Debug|x86.ActiveCfg = Debug|x86 {BB824B8F-3E3B-48AB-86DB-168D15121372}.Debug|x86.Build.0 = Debug|x86 {BB824B8F-3E3B-48AB-86DB-168D15121372}.Release|x64.ActiveCfg = Release|x64 {BB824B8F-3E3B-48AB-86DB-168D15121372}.Release|x64.Build.0 = Release|x64 {BB824B8F-3E3B-48AB-86DB-168D15121372}.Release|x86.ActiveCfg = Release|x86 {BB824B8F-3E3B-48AB-86DB-168D15121372}.Release|x86.Build.0 = Release|x86 {6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}.Debug|x64.ActiveCfg = Debug|x64 {6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}.Debug|x64.Build.0 = Debug|x64 {6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}.Debug|x86.ActiveCfg = Debug|x86 {6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}.Debug|x86.Build.0 = Debug|x86 {6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}.Release|x64.ActiveCfg = Release|x64 {6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}.Release|x64.Build.0 = Release|x64 {6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}.Release|x86.ActiveCfg = Release|x86 {6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}.Release|x86.Build.0 = Release|x86 {532E9E35-D877-41A6-81F4-505AEA17AA04}.Debug|x64.ActiveCfg = Debug|x64 {532E9E35-D877-41A6-81F4-505AEA17AA04}.Debug|x64.Build.0 = Debug|x64 {532E9E35-D877-41A6-81F4-505AEA17AA04}.Debug|x86.ActiveCfg = Debug|x86 {532E9E35-D877-41A6-81F4-505AEA17AA04}.Debug|x86.Build.0 = Debug|x86 {532E9E35-D877-41A6-81F4-505AEA17AA04}.Release|x64.ActiveCfg = Release|x64 {532E9E35-D877-41A6-81F4-505AEA17AA04}.Release|x64.Build.0 = Release|x64 {532E9E35-D877-41A6-81F4-505AEA17AA04}.Release|x86.ActiveCfg = Release|x86 {532E9E35-D877-41A6-81F4-505AEA17AA04}.Release|x86.Build.0 = Release|x86 {E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}.Debug|x64.ActiveCfg = Debug|x64 {E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}.Debug|x64.Build.0 = Debug|x64 {E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}.Debug|x86.ActiveCfg = Debug|x86 {E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}.Debug|x86.Build.0 = Debug|x86 {E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}.Release|x64.ActiveCfg = Release|x64 {E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}.Release|x64.Build.0 = Release|x64 {E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}.Release|x86.ActiveCfg = Release|x86 {E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}.Release|x86.Build.0 = Release|x86 {1E2F50AA-DA21-4D71-AF07-F38B68026CAA}.Debug|x64.ActiveCfg = Debug|x64 {1E2F50AA-DA21-4D71-AF07-F38B68026CAA}.Debug|x64.Build.0 = Debug|x64 {1E2F50AA-DA21-4D71-AF07-F38B68026CAA}.Debug|x86.ActiveCfg = Debug|x86 {1E2F50AA-DA21-4D71-AF07-F38B68026CAA}.Debug|x86.Build.0 = Debug|x86 {1E2F50AA-DA21-4D71-AF07-F38B68026CAA}.Release|x64.ActiveCfg = Release|x64 {1E2F50AA-DA21-4D71-AF07-F38B68026CAA}.Release|x64.Build.0 = Release|x64 {1E2F50AA-DA21-4D71-AF07-F38B68026CAA}.Release|x86.ActiveCfg = Release|x86 {1E2F50AA-DA21-4D71-AF07-F38B68026CAA}.Release|x86.Build.0 = Release|x86 EndGlobalSection EndGlobal
Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio 15 Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "DebuggerTestSuite", "DebuggerTestSuite\DebuggerTestSuite.csproj", "{FF4CBDBF-EAC9-4531-A0F7-F3B834182862}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "lazy-debugger-test", "tests\lazy-debugger-test\lazy-debugger-test.csproj", "{BB824B8F-3E3B-48AB-86DB-168D15121372}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "debugger-test", "tests\debugger-test\debugger-test.csproj", "{6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "BrowserDebugProxy", "BrowserDebugProxy\BrowserDebugProxy.csproj", "{532E9E35-D877-41A6-81F4-505AEA17AA04}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "BrowserDebugHost", "BrowserDebugHost\BrowserDebugHost.csproj", "{E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "lazy-debugger-test-embedded", "tests\lazy-debugger-test-embedded\lazy-debugger-test-embedded.csproj", "{1E2F50AA-DA21-4D71-AF07-F38B68026CAA}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|x64 = Debug|x64 Debug|x86 = Debug|x86 Release|x64 = Release|x64 Release|x86 = Release|x86 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {FF4CBDBF-EAC9-4531-A0F7-F3B834182862}.Debug|x64.ActiveCfg = Debug|x64 {FF4CBDBF-EAC9-4531-A0F7-F3B834182862}.Debug|x64.Build.0 = Debug|x64 {FF4CBDBF-EAC9-4531-A0F7-F3B834182862}.Debug|x86.ActiveCfg = Debug|x86 {FF4CBDBF-EAC9-4531-A0F7-F3B834182862}.Debug|x86.Build.0 = Debug|x86 {FF4CBDBF-EAC9-4531-A0F7-F3B834182862}.Release|x64.ActiveCfg = Release|x64 {FF4CBDBF-EAC9-4531-A0F7-F3B834182862}.Release|x64.Build.0 = Release|x64 {FF4CBDBF-EAC9-4531-A0F7-F3B834182862}.Release|x86.ActiveCfg = Release|x86 {FF4CBDBF-EAC9-4531-A0F7-F3B834182862}.Release|x86.Build.0 = Release|x86 {BB824B8F-3E3B-48AB-86DB-168D15121372}.Debug|x64.ActiveCfg = Debug|x64 {BB824B8F-3E3B-48AB-86DB-168D15121372}.Debug|x64.Build.0 = Debug|x64 {BB824B8F-3E3B-48AB-86DB-168D15121372}.Debug|x86.ActiveCfg = Debug|x86 {BB824B8F-3E3B-48AB-86DB-168D15121372}.Debug|x86.Build.0 = Debug|x86 {BB824B8F-3E3B-48AB-86DB-168D15121372}.Release|x64.ActiveCfg = Release|x64 {BB824B8F-3E3B-48AB-86DB-168D15121372}.Release|x64.Build.0 = Release|x64 {BB824B8F-3E3B-48AB-86DB-168D15121372}.Release|x86.ActiveCfg = Release|x86 {BB824B8F-3E3B-48AB-86DB-168D15121372}.Release|x86.Build.0 = Release|x86 {6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}.Debug|x64.ActiveCfg = Debug|x64 {6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}.Debug|x64.Build.0 = Debug|x64 {6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}.Debug|x86.ActiveCfg = Debug|x86 {6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}.Debug|x86.Build.0 = Debug|x86 {6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}.Release|x64.ActiveCfg = Release|x64 {6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}.Release|x64.Build.0 = Release|x64 {6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}.Release|x86.ActiveCfg = Release|x86 {6C3A70F3-12C1-457D-A8D7-1E0E335B1F66}.Release|x86.Build.0 = Release|x86 {532E9E35-D877-41A6-81F4-505AEA17AA04}.Debug|x64.ActiveCfg = Debug|x64 {532E9E35-D877-41A6-81F4-505AEA17AA04}.Debug|x64.Build.0 = Debug|x64 {532E9E35-D877-41A6-81F4-505AEA17AA04}.Debug|x86.ActiveCfg = Debug|x86 {532E9E35-D877-41A6-81F4-505AEA17AA04}.Debug|x86.Build.0 = Debug|x86 {532E9E35-D877-41A6-81F4-505AEA17AA04}.Release|x64.ActiveCfg = Release|x64 {532E9E35-D877-41A6-81F4-505AEA17AA04}.Release|x64.Build.0 = Release|x64 {532E9E35-D877-41A6-81F4-505AEA17AA04}.Release|x86.ActiveCfg = Release|x86 {532E9E35-D877-41A6-81F4-505AEA17AA04}.Release|x86.Build.0 = Release|x86 {E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}.Debug|x64.ActiveCfg = Debug|x64 {E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}.Debug|x64.Build.0 = Debug|x64 {E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}.Debug|x86.ActiveCfg = Debug|x86 {E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}.Debug|x86.Build.0 = Debug|x86 {E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}.Release|x64.ActiveCfg = Release|x64 {E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}.Release|x64.Build.0 = Release|x64 {E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}.Release|x86.ActiveCfg = Release|x86 {E8A5E324-3FCD-4E3A-AC99-0C39037A8C1B}.Release|x86.Build.0 = Release|x86 {1E2F50AA-DA21-4D71-AF07-F38B68026CAA}.Debug|x64.ActiveCfg = Debug|x64 {1E2F50AA-DA21-4D71-AF07-F38B68026CAA}.Debug|x64.Build.0 = Debug|x64 {1E2F50AA-DA21-4D71-AF07-F38B68026CAA}.Debug|x86.ActiveCfg = Debug|x86 {1E2F50AA-DA21-4D71-AF07-F38B68026CAA}.Debug|x86.Build.0 = Debug|x86 {1E2F50AA-DA21-4D71-AF07-F38B68026CAA}.Release|x64.ActiveCfg = Release|x64 {1E2F50AA-DA21-4D71-AF07-F38B68026CAA}.Release|x64.Build.0 = Release|x64 {1E2F50AA-DA21-4D71-AF07-F38B68026CAA}.Release|x86.ActiveCfg = Release|x86 {1E2F50AA-DA21-4D71-AF07-F38B68026CAA}.Release|x86.Build.0 = Release|x86 EndGlobalSection EndGlobal
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/libraries/Microsoft.VisualBasic.Core/tests/Microsoft/VisualBasic/VBFixedStringAttributeTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Xunit; namespace Microsoft.VisualBasic.Tests { public class VBFixedStringAttributeTests { [Theory] [InlineData(1)] [InlineData(32767)] public void Ctor_Int(int length) { var attribute = new VBFixedStringAttribute(length); Assert.Equal(length, attribute.Length); } [Theory] [InlineData(-1)] [InlineData(0)] [InlineData(32768)] public void Ctor_InvalidLength_ThrowsArgumentException(int length) { AssertExtensions.Throws<ArgumentException>(null, () => new VBFixedStringAttribute(length)); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Xunit; namespace Microsoft.VisualBasic.Tests { public class VBFixedStringAttributeTests { [Theory] [InlineData(1)] [InlineData(32767)] public void Ctor_Int(int length) { var attribute = new VBFixedStringAttribute(length); Assert.Equal(length, attribute.Length); } [Theory] [InlineData(-1)] [InlineData(0)] [InlineData(32768)] public void Ctor_InvalidLength_ThrowsArgumentException(int length) { AssertExtensions.Throws<ArgumentException>(null, () => new VBFixedStringAttribute(length)); } } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/include/tdep-hppa/jmpbuf.h
/* libunwind - a platform-independent unwind library Copyright (C) 2004 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* Use glibc's jump-buffer indices; NPTL peeks at SP: */ #ifndef JB_SP # define JB_SP 19 #endif #define JB_RP 20 #define JB_MASK_SAVED 21 #define JB_MASK 22
/* libunwind - a platform-independent unwind library Copyright (C) 2004 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* Use glibc's jump-buffer indices; NPTL peeks at SP: */ #ifndef JB_SP # define JB_SP 19 #endif #define JB_RP 20 #define JB_MASK_SAVED 21 #define JB_MASK 22
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest594/Generated594.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated594.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated594.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/Common/Platform/platformdefines.h
// Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // #include <stdio.h> #include <memory.h> #include <stdlib.h> #include <string.h> #include <cstdint> #include <minipal/utils.h> #ifndef _PLATFORMDEFINES__H #define _PLATFORMDEFINES__H #ifdef _MSC_VER // Our tests don't care about secure CRT #define _CRT_SECURE_NO_WARNINGS 1 #endif // Ensure that both UNICODE and _UNICODE are set. #ifndef _UNICODE #define _UNICODE #endif #ifndef UNICODE #define UNICODE #endif #include <wchar.h> #define static_assert_no_msg(x) static_assert((x), #x) // // types and constants // #ifdef WINDOWS #define NOMINMAX #include <windows.h> #include <combaseapi.h> #define FS_SEPERATOR L"\\" #define PATH_DELIMITER L";" #define L(t) L##t #define W(str) L##str typedef unsigned error_t; typedef HANDLE THREAD_ID; #define DLL_EXPORT __declspec(dllexport) #else // !WINDOWS #include <pthread.h> typedef char16_t WCHAR; typedef unsigned int DWORD; #ifdef OBJC_TESTS // The Objective-C headers define the BOOL type to be unsigned char or bool. // As a result, we can't redefine it here. So instead, define WINBOOL to be int-sized. typedef int WINBOOL; #else typedef int BOOL; #endif typedef WCHAR *LPWSTR, *PWSTR; typedef const WCHAR *LPCWSTR, *PCWSTR; typedef int HRESULT; #define LONGLONG long long #define ULONGLONG unsigned LONGLONG typedef unsigned int ULONG, *PULONG; #define S_OK 0x0 #define SUCCEEDED(_hr) ((HRESULT)(_hr) >= 0) #define FAILED(_hr) ((HRESULT)(_hr) < 0) #define CCH_BSTRMAX 0x7FFFFFFF // 4 + (0x7ffffffb + 1 ) * 2 ==> 0xFFFFFFFC #define CB_BSTRMAX 0xFFFFFFFa // 4 + (0xfffffff6 + 2) ==> 0xFFFFFFFC #ifdef RC_INVOKED #define _HRESULT_TYPEDEF_(_sc) _sc #else // RC_INVOKED #define _HRESULT_TYPEDEF_(_sc) ((HRESULT)_sc) #endif // RC_INVOKED #define E_INVALIDARG _HRESULT_TYPEDEF_(0x80070057L) #ifdef HOST_64BIT #define __int64 long #else // HOST_64BIT #define __int64 long long #endif // HOST_64BIT #define UInt32x32To64(a, b) ((unsigned __int64)((ULONG)(a)) * (unsigned __int64)((ULONG)(b))) #ifndef TRUE #define TRUE 1 #endif #ifndef FALSE #define FALSE 0 #endif #ifndef WINAPI #define WINAPI __stdcall #endif #ifndef STDMETHODCALLTYPE #define STDMETHODCALLTYPE #endif #ifndef STDMETHODVCALLTYPE #define STDMETHODVCALLTYPE #endif #ifndef _MSC_VER #if __i386__ #define __stdcall __attribute__((stdcall)) #define _cdecl __attribute__((cdecl)) #define __cdecl __attribute__((cdecl)) #define __thiscall __attribute__((thiscall)) #else #define __stdcall #define _cdecl #define __cdecl #define __thiscall #endif #endif #if __GNUC__ >= 4 #define DLL_EXPORT __attribute__ ((visibility ("default"))) #else #define DLL_EXPORT #endif LPWSTR HackyConvertToWSTR(const char* pszInput); #define FS_SEPERATOR L("/") #define PATH_DELIMITER L(":") #define L(t) HackyConvertToWSTR(t) #define W(str) u##str #define MAX_PATH 260 #define __FUNCTIONW__ HackyConvertToWSTR(__func__) typedef pthread_t THREAD_ID; typedef void* (*MacWorker)(void*); typedef DWORD __stdcall (*LPTHREAD_START_ROUTINE)(void*); typedef WCHAR TCHAR; typedef char* LPSTR; typedef const char* LPCSTR; typedef TCHAR* LPTSTR; typedef const TCHAR* LPCTSTR; typedef void* FARPROC; typedef void* HANDLE; typedef HANDLE HMODULE; typedef void* ULONG_PTR; typedef int error_t; typedef void* LPVOID; typedef unsigned char BYTE; typedef WCHAR OLECHAR; typedef double DATE; typedef DWORD LCID; #endif typedef ULONG_PTR DWORD_PTR; // // Method declarations // error_t TP_scpy_s(LPWSTR strDestination, size_t sizeInWords, LPCWSTR strSource); error_t TP_scat_s(LPWSTR strDestination, size_t sizeInWords, LPCWSTR strSource); size_t TP_slen(LPCWSTR str); int TP_scmp_s(LPCSTR str1, LPCSTR str2); int TP_wcmp_s(LPCWSTR str1, LPCWSTR str2); error_t TP_getenv_s(size_t* pReturnValue, LPWSTR buffer, size_t sizeInWords, LPCWSTR varname); error_t TP_putenv_s(LPTSTR name, LPTSTR value); void TP_ZeroMemory(LPVOID buffer, size_t sizeInBytes); error_t TP_itow_s(int num, LPWSTR buffer, size_t sizeInCharacters, int radix); error_t TP_itoa_s(int num, LPSTR buffer, size_t sizeInCharacters, int radix); LPWSTR TP_sstr(LPWSTR str, LPWSTR searchStr); LPSTR HackyConvertToSTR(LPWSTR pwszInput); void TP_DebugBreak(); DWORD TP_GetFullPathName(LPWSTR fileName, DWORD nBufferLength, LPWSTR lpBuffer); size_t TP_strncpy_s(char* strDest, size_t numberOfElements, const char *strSource, size_t count); size_t TP_strcpy_s(char *dest, size_t n, char const *src); int TP_wcsncpy_s(LPWSTR strDestination, size_t size1, LPCWSTR strSource, size_t size2); int TP_wcsncmp(LPCWSTR str1, LPCWSTR str2,size_t len); int TP_wmemcmp(LPCWSTR str1, LPCWSTR str2,size_t len); typedef WCHAR* BSTR; BSTR CoreClrBStrAlloc(LPCSTR psz, size_t len); BSTR CoreClrBStrAlloc(LPCWSTR psz, size_t len); inline void *CoreClrBStrAlloc(size_t cb) { // A null is automatically applied in the SysAllocStringByteLen API. // Remove a single OLECHAR for the implied null. // https://docs.microsoft.com/en-us/previous-versions/windows/desktop/api/oleauto/nf-oleauto-sysallocstringbytelen if (cb >= sizeof(OLECHAR)) cb -= sizeof(OLECHAR); return CoreClrBStrAlloc((LPCSTR)nullptr, cb); } void CoreClrBStrFree(BSTR bstr); inline void CoreClrBStrFree(void* p) { CoreClrBStrFree((BSTR)p); } size_t TP_SysStringByteLen(BSTR bstr); BSTR TP_SysAllocString(LPCWSTR psz); size_t TP_SysStringLen(BSTR bstr); inline void *CoreClrAlloc(size_t cb) { #ifdef WINDOWS return ::CoTaskMemAlloc(cb); #else return ::malloc(cb); #endif } inline void CoreClrFree(void *p) { #ifdef WINDOWS return ::CoTaskMemFree(p); #else return ::free(p); #endif } // // Method redirects // #ifdef WINDOWS #define TP_LoadLibrary(l) LoadLibrary(l) #define TP_LoadLibraryW(l) LoadLibraryW(l) #define TP_LoadLibraryA(l) LoadLibraryA(l) #define TP_GetProcAddress(m,e) GetProcAddress(m,e) #define TP_DebugBreak() DebugBreak() #define TP_rand rand #define TP_srand srand #else #define fopen_s(FILEHANDLE, FILENAME, MODE) *(FILEHANDLE) = fopen(FILENAME, MODE) #define _fsopen(FILENAME, MODE, ACCESS) fopen(FILENAME, MODE) #define GetCurrentDirectory(BUFSIZ, BUF) getcwd(BUF, BUFSIZ) #define DeleteFile unlink #define GlobalFree free #define sprintf_s snprintf #define fwscanf_s fwscanf #define strcat_s(DST,SIZ,SRC) strlcat(DST,SRC,SIZ) #define TP_LoadLibrary(l) dlopen(l, 0) #define TP_LoadLibraryW(l) dlopen(l, 0) #define TP_LoadLibraryA(l) dlopen(l, 0) #define TP_GetProcAddress(m,e) dlsym(m,e) #define TP_rand arc4random #define TP_srand srandom #define GetFullPathNameW(fname,buflen,buf,filepart) TP_GetFullPathName(fname,buflen,buf) #define ZeroMemory TP_ZeroMemory #define _itow_s TP_itow_s #define _itoa_s TP_itoa_s #define strcmp TP_scmp_s #define strncpy_s TP_strncpy_s #define strcpy_s TP_strcpy_s #endif #if defined(TARGET_XARCH) && !defined(_MSC_VER) #define ENABLE_AVX __attribute__ ((target("avx"))) #else #define ENABLE_AVX #endif #endif
// Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // #include <stdio.h> #include <memory.h> #include <stdlib.h> #include <string.h> #include <cstdint> #include <minipal/utils.h> #ifndef _PLATFORMDEFINES__H #define _PLATFORMDEFINES__H #ifdef _MSC_VER // Our tests don't care about secure CRT #define _CRT_SECURE_NO_WARNINGS 1 #endif // Ensure that both UNICODE and _UNICODE are set. #ifndef _UNICODE #define _UNICODE #endif #ifndef UNICODE #define UNICODE #endif #include <wchar.h> #define static_assert_no_msg(x) static_assert((x), #x) // // types and constants // #ifdef WINDOWS #define NOMINMAX #include <windows.h> #include <combaseapi.h> #define FS_SEPERATOR L"\\" #define PATH_DELIMITER L";" #define L(t) L##t #define W(str) L##str typedef unsigned error_t; typedef HANDLE THREAD_ID; #define DLL_EXPORT __declspec(dllexport) #else // !WINDOWS #include <pthread.h> typedef char16_t WCHAR; typedef unsigned int DWORD; #ifdef OBJC_TESTS // The Objective-C headers define the BOOL type to be unsigned char or bool. // As a result, we can't redefine it here. So instead, define WINBOOL to be int-sized. typedef int WINBOOL; #else typedef int BOOL; #endif typedef WCHAR *LPWSTR, *PWSTR; typedef const WCHAR *LPCWSTR, *PCWSTR; typedef int HRESULT; #define LONGLONG long long #define ULONGLONG unsigned LONGLONG typedef unsigned int ULONG, *PULONG; #define S_OK 0x0 #define SUCCEEDED(_hr) ((HRESULT)(_hr) >= 0) #define FAILED(_hr) ((HRESULT)(_hr) < 0) #define CCH_BSTRMAX 0x7FFFFFFF // 4 + (0x7ffffffb + 1 ) * 2 ==> 0xFFFFFFFC #define CB_BSTRMAX 0xFFFFFFFa // 4 + (0xfffffff6 + 2) ==> 0xFFFFFFFC #ifdef RC_INVOKED #define _HRESULT_TYPEDEF_(_sc) _sc #else // RC_INVOKED #define _HRESULT_TYPEDEF_(_sc) ((HRESULT)_sc) #endif // RC_INVOKED #define E_INVALIDARG _HRESULT_TYPEDEF_(0x80070057L) #ifdef HOST_64BIT #define __int64 long #else // HOST_64BIT #define __int64 long long #endif // HOST_64BIT #define UInt32x32To64(a, b) ((unsigned __int64)((ULONG)(a)) * (unsigned __int64)((ULONG)(b))) #ifndef TRUE #define TRUE 1 #endif #ifndef FALSE #define FALSE 0 #endif #ifndef WINAPI #define WINAPI __stdcall #endif #ifndef STDMETHODCALLTYPE #define STDMETHODCALLTYPE #endif #ifndef STDMETHODVCALLTYPE #define STDMETHODVCALLTYPE #endif #ifndef _MSC_VER #if __i386__ #define __stdcall __attribute__((stdcall)) #define _cdecl __attribute__((cdecl)) #define __cdecl __attribute__((cdecl)) #define __thiscall __attribute__((thiscall)) #else #define __stdcall #define _cdecl #define __cdecl #define __thiscall #endif #endif #if __GNUC__ >= 4 #define DLL_EXPORT __attribute__ ((visibility ("default"))) #else #define DLL_EXPORT #endif LPWSTR HackyConvertToWSTR(const char* pszInput); #define FS_SEPERATOR L("/") #define PATH_DELIMITER L(":") #define L(t) HackyConvertToWSTR(t) #define W(str) u##str #define MAX_PATH 260 #define __FUNCTIONW__ HackyConvertToWSTR(__func__) typedef pthread_t THREAD_ID; typedef void* (*MacWorker)(void*); typedef DWORD __stdcall (*LPTHREAD_START_ROUTINE)(void*); typedef WCHAR TCHAR; typedef char* LPSTR; typedef const char* LPCSTR; typedef TCHAR* LPTSTR; typedef const TCHAR* LPCTSTR; typedef void* FARPROC; typedef void* HANDLE; typedef HANDLE HMODULE; typedef void* ULONG_PTR; typedef int error_t; typedef void* LPVOID; typedef unsigned char BYTE; typedef WCHAR OLECHAR; typedef double DATE; typedef DWORD LCID; #endif typedef ULONG_PTR DWORD_PTR; // // Method declarations // error_t TP_scpy_s(LPWSTR strDestination, size_t sizeInWords, LPCWSTR strSource); error_t TP_scat_s(LPWSTR strDestination, size_t sizeInWords, LPCWSTR strSource); size_t TP_slen(LPCWSTR str); int TP_scmp_s(LPCSTR str1, LPCSTR str2); int TP_wcmp_s(LPCWSTR str1, LPCWSTR str2); error_t TP_getenv_s(size_t* pReturnValue, LPWSTR buffer, size_t sizeInWords, LPCWSTR varname); error_t TP_putenv_s(LPTSTR name, LPTSTR value); void TP_ZeroMemory(LPVOID buffer, size_t sizeInBytes); error_t TP_itow_s(int num, LPWSTR buffer, size_t sizeInCharacters, int radix); error_t TP_itoa_s(int num, LPSTR buffer, size_t sizeInCharacters, int radix); LPWSTR TP_sstr(LPWSTR str, LPWSTR searchStr); LPSTR HackyConvertToSTR(LPWSTR pwszInput); void TP_DebugBreak(); DWORD TP_GetFullPathName(LPWSTR fileName, DWORD nBufferLength, LPWSTR lpBuffer); size_t TP_strncpy_s(char* strDest, size_t numberOfElements, const char *strSource, size_t count); size_t TP_strcpy_s(char *dest, size_t n, char const *src); int TP_wcsncpy_s(LPWSTR strDestination, size_t size1, LPCWSTR strSource, size_t size2); int TP_wcsncmp(LPCWSTR str1, LPCWSTR str2,size_t len); int TP_wmemcmp(LPCWSTR str1, LPCWSTR str2,size_t len); typedef WCHAR* BSTR; BSTR CoreClrBStrAlloc(LPCSTR psz, size_t len); BSTR CoreClrBStrAlloc(LPCWSTR psz, size_t len); inline void *CoreClrBStrAlloc(size_t cb) { // A null is automatically applied in the SysAllocStringByteLen API. // Remove a single OLECHAR for the implied null. // https://docs.microsoft.com/en-us/previous-versions/windows/desktop/api/oleauto/nf-oleauto-sysallocstringbytelen if (cb >= sizeof(OLECHAR)) cb -= sizeof(OLECHAR); return CoreClrBStrAlloc((LPCSTR)nullptr, cb); } void CoreClrBStrFree(BSTR bstr); inline void CoreClrBStrFree(void* p) { CoreClrBStrFree((BSTR)p); } size_t TP_SysStringByteLen(BSTR bstr); BSTR TP_SysAllocString(LPCWSTR psz); size_t TP_SysStringLen(BSTR bstr); inline void *CoreClrAlloc(size_t cb) { #ifdef WINDOWS return ::CoTaskMemAlloc(cb); #else return ::malloc(cb); #endif } inline void CoreClrFree(void *p) { #ifdef WINDOWS return ::CoTaskMemFree(p); #else return ::free(p); #endif } // // Method redirects // #ifdef WINDOWS #define TP_LoadLibrary(l) LoadLibrary(l) #define TP_LoadLibraryW(l) LoadLibraryW(l) #define TP_LoadLibraryA(l) LoadLibraryA(l) #define TP_GetProcAddress(m,e) GetProcAddress(m,e) #define TP_DebugBreak() DebugBreak() #define TP_rand rand #define TP_srand srand #else #define fopen_s(FILEHANDLE, FILENAME, MODE) *(FILEHANDLE) = fopen(FILENAME, MODE) #define _fsopen(FILENAME, MODE, ACCESS) fopen(FILENAME, MODE) #define GetCurrentDirectory(BUFSIZ, BUF) getcwd(BUF, BUFSIZ) #define DeleteFile unlink #define GlobalFree free #define sprintf_s snprintf #define fwscanf_s fwscanf #define strcat_s(DST,SIZ,SRC) strlcat(DST,SRC,SIZ) #define TP_LoadLibrary(l) dlopen(l, 0) #define TP_LoadLibraryW(l) dlopen(l, 0) #define TP_LoadLibraryA(l) dlopen(l, 0) #define TP_GetProcAddress(m,e) dlsym(m,e) #define TP_rand arc4random #define TP_srand srandom #define GetFullPathNameW(fname,buflen,buf,filepart) TP_GetFullPathName(fname,buflen,buf) #define ZeroMemory TP_ZeroMemory #define _itow_s TP_itow_s #define _itoa_s TP_itoa_s #define strcmp TP_scmp_s #define strncpy_s TP_strncpy_s #define strcpy_s TP_strcpy_s #endif #if defined(TARGET_XARCH) && !defined(_MSC_VER) #define ENABLE_AVX __attribute__ ((target("avx"))) #else #define ENABLE_AVX #endif #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/libraries/System.Data.Common/src/System/Data/DbType.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Data { public enum DbType { AnsiString = 0, Binary = 1, Byte = 2, Boolean = 3, Currency = 4, Date = 5, DateTime = 6, Decimal = 7, Double = 8, Guid = 9, Int16 = 10, Int32 = 11, Int64 = 12, Object = 13, SByte = 14, Single = 15, String = 16, Time = 17, UInt16 = 18, UInt32 = 19, UInt64 = 20, VarNumeric = 21, AnsiStringFixedLength = 22, StringFixedLength = 23, Xml = 25, DateTime2 = 26, DateTimeOffset = 27, } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Data { public enum DbType { AnsiString = 0, Binary = 1, Byte = 2, Boolean = 3, Currency = 4, Date = 5, DateTime = 6, Decimal = 7, Double = 8, Guid = 9, Int16 = 10, Int32 = 11, Int64 = 12, Object = 13, SByte = 14, Single = 15, String = 16, Time = 17, UInt16 = 18, UInt32 = 19, UInt64 = 20, VarNumeric = 21, AnsiStringFixedLength = 22, StringFixedLength = 23, Xml = 25, DateTime2 = 26, DateTimeOffset = 27, } }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/jit/compiler.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif // _MSC_VER #include "hostallocator.h" #include "emit.h" #include "ssabuilder.h" #include "valuenum.h" #include "rangecheck.h" #include "lower.h" #include "stacklevelsetter.h" #include "jittelemetry.h" #include "patchpointinfo.h" #include "jitstd/algorithm.h" extern ICorJitHost* g_jitHost; #if defined(DEBUG) // Column settings for COMPlus_JitDumpIR. We could(should) make these programmable. #define COLUMN_OPCODE 30 #define COLUMN_OPERANDS (COLUMN_OPCODE + 25) #define COLUMN_KINDS 110 #define COLUMN_FLAGS (COLUMN_KINDS + 32) #endif #if defined(DEBUG) unsigned Compiler::jitTotalMethodCompiled = 0; #endif // defined(DEBUG) #if defined(DEBUG) LONG Compiler::jitNestingLevel = 0; #endif // defined(DEBUG) // static bool Compiler::s_pAltJitExcludeAssembliesListInitialized = false; AssemblyNamesList2* Compiler::s_pAltJitExcludeAssembliesList = nullptr; #ifdef DEBUG // static bool Compiler::s_pJitDisasmIncludeAssembliesListInitialized = false; AssemblyNamesList2* Compiler::s_pJitDisasmIncludeAssembliesList = nullptr; // static bool Compiler::s_pJitFunctionFileInitialized = false; MethodSet* Compiler::s_pJitMethodSet = nullptr; #endif // DEBUG #ifdef CONFIGURABLE_ARM_ABI // static bool GlobalJitOptions::compFeatureHfa = false; LONG GlobalJitOptions::compUseSoftFPConfigured = 0; #endif // CONFIGURABLE_ARM_ABI /***************************************************************************** * * Little helpers to grab the current cycle counter value; this is done * differently based on target architecture, host toolchain, etc. The * main thing is to keep the overhead absolutely minimal; in fact, on * x86/x64 we use RDTSC even though it's not thread-safe; GetThreadCycles * (which is monotonous) is just too expensive. */ #ifdef FEATURE_JIT_METHOD_PERF #if defined(HOST_X86) || defined(HOST_AMD64) #if defined(_MSC_VER) #include <intrin.h> inline bool _our_GetThreadCycles(unsigned __int64* cycleOut) { *cycleOut = __rdtsc(); return true; } #elif defined(__GNUC__) inline bool _our_GetThreadCycles(unsigned __int64* cycleOut) { uint32_t hi, lo; __asm__ __volatile__("rdtsc" : "=a"(lo), "=d"(hi)); *cycleOut = (static_cast<unsigned __int64>(hi) << 32) | static_cast<unsigned __int64>(lo); return true; } #else // neither _MSC_VER nor __GNUC__ // The following *might* work - might as well try. #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #endif #elif defined(HOST_ARM) || defined(HOST_ARM64) // If this doesn't work please see ../gc/gc.cpp for additional ARM // info (and possible solutions). #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #else // not x86/x64 and not ARM // Don't know what this target is, but let's give it a try; if // someone really wants to make this work, please add the right // code here. #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #endif // which host OS const BYTE genTypeSizes[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) sz, #include "typelist.h" #undef DEF_TP }; const BYTE genTypeAlignments[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) al, #include "typelist.h" #undef DEF_TP }; const BYTE genTypeStSzs[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) st, #include "typelist.h" #undef DEF_TP }; const BYTE genActualTypes[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) jitType, #include "typelist.h" #undef DEF_TP }; #endif // FEATURE_JIT_METHOD_PERF /*****************************************************************************/ inline unsigned getCurTime() { SYSTEMTIME tim; GetSystemTime(&tim); return (((tim.wHour * 60) + tim.wMinute) * 60 + tim.wSecond) * 1000 + tim.wMilliseconds; } /*****************************************************************************/ #ifdef DEBUG /*****************************************************************************/ static FILE* jitSrcFilePtr; static unsigned jitCurSrcLine; void Compiler::JitLogEE(unsigned level, const char* fmt, ...) { va_list args; if (verbose) { va_start(args, fmt); vflogf(jitstdout, fmt, args); va_end(args); } va_start(args, fmt); vlogf(level, fmt, args); va_end(args); } #endif // DEBUG /*****************************************************************************/ #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS static unsigned genMethodCnt; // total number of methods JIT'ted unsigned genMethodICnt; // number of interruptible methods unsigned genMethodNCnt; // number of non-interruptible methods static unsigned genSmallMethodsNeedingExtraMemoryCnt = 0; #endif /*****************************************************************************/ #if MEASURE_NODE_SIZE NodeSizeStats genNodeSizeStats; NodeSizeStats genNodeSizeStatsPerFunc; unsigned genTreeNcntHistBuckets[] = {10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 1000, 5000, 10000, 0}; Histogram genTreeNcntHist(genTreeNcntHistBuckets); unsigned genTreeNsizHistBuckets[] = {1000, 5000, 10000, 50000, 100000, 500000, 1000000, 0}; Histogram genTreeNsizHist(genTreeNsizHistBuckets); #endif // MEASURE_NODE_SIZE /*****************************************************************************/ #if MEASURE_MEM_ALLOC unsigned memAllocHistBuckets[] = {64, 128, 192, 256, 512, 1024, 4096, 8192, 0}; Histogram memAllocHist(memAllocHistBuckets); unsigned memUsedHistBuckets[] = {16, 32, 64, 128, 192, 256, 512, 1024, 4096, 8192, 0}; Histogram memUsedHist(memUsedHistBuckets); #endif // MEASURE_MEM_ALLOC /***************************************************************************** * * Variables to keep track of total code amounts. */ #if DISPLAY_SIZES size_t grossVMsize; // Total IL code size size_t grossNCsize; // Native code + data size size_t totalNCsize; // Native code + data + GC info size (TODO-Cleanup: GC info size only accurate for JIT32_GCENCODER) size_t gcHeaderISize; // GC header size: interruptible methods size_t gcPtrMapISize; // GC pointer map size: interruptible methods size_t gcHeaderNSize; // GC header size: non-interruptible methods size_t gcPtrMapNSize; // GC pointer map size: non-interruptible methods #endif // DISPLAY_SIZES /***************************************************************************** * * Variables to keep track of argument counts. */ #if CALL_ARG_STATS unsigned argTotalCalls; unsigned argHelperCalls; unsigned argStaticCalls; unsigned argNonVirtualCalls; unsigned argVirtualCalls; unsigned argTotalArgs; // total number of args for all calls (including objectPtr) unsigned argTotalDWordArgs; unsigned argTotalLongArgs; unsigned argTotalFloatArgs; unsigned argTotalDoubleArgs; unsigned argTotalRegArgs; unsigned argTotalTemps; unsigned argTotalLclVar; unsigned argTotalDeferred; unsigned argTotalConst; unsigned argTotalObjPtr; unsigned argTotalGTF_ASGinArgs; unsigned argMaxTempsPerMethod; unsigned argCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argCntTable(argCntBuckets); unsigned argDWordCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argDWordCntTable(argDWordCntBuckets); unsigned argDWordLngCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argDWordLngCntTable(argDWordLngCntBuckets); unsigned argTempsCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argTempsCntTable(argTempsCntBuckets); #endif // CALL_ARG_STATS /***************************************************************************** * * Variables to keep track of basic block counts. */ #if COUNT_BASIC_BLOCKS // -------------------------------------------------- // Basic block count frequency table: // -------------------------------------------------- // <= 1 ===> 26872 count ( 56% of total) // 2 .. 2 ===> 669 count ( 58% of total) // 3 .. 3 ===> 4687 count ( 68% of total) // 4 .. 5 ===> 5101 count ( 78% of total) // 6 .. 10 ===> 5575 count ( 90% of total) // 11 .. 20 ===> 3028 count ( 97% of total) // 21 .. 50 ===> 1108 count ( 99% of total) // 51 .. 100 ===> 182 count ( 99% of total) // 101 .. 1000 ===> 34 count (100% of total) // 1001 .. 10000 ===> 0 count (100% of total) // -------------------------------------------------- unsigned bbCntBuckets[] = {1, 2, 3, 5, 10, 20, 50, 100, 1000, 10000, 0}; Histogram bbCntTable(bbCntBuckets); /* Histogram for the IL opcode size of methods with a single basic block */ unsigned bbSizeBuckets[] = {1, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 0}; Histogram bbOneBBSizeTable(bbSizeBuckets); #endif // COUNT_BASIC_BLOCKS /***************************************************************************** * * Used by optFindNaturalLoops to gather statistical information such as * - total number of natural loops * - number of loops with 1, 2, ... exit conditions * - number of loops that have an iterator (for like) * - number of loops that have a constant iterator */ #if COUNT_LOOPS unsigned totalLoopMethods; // counts the total number of methods that have natural loops unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent unsigned totalLoopCount; // counts the total number of natural loops unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent unsigned iterLoopCount; // counts the # of loops with an iterator (for like) unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < const) unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like) bool hasMethodLoops; // flag to keep track if we already counted a method as having loops unsigned loopsThisMethod; // counts the number of loops in the current method bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method. /* Histogram for number of loops in a method */ unsigned loopCountBuckets[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0}; Histogram loopCountTable(loopCountBuckets); /* Histogram for number of loop exits */ unsigned loopExitCountBuckets[] = {0, 1, 2, 3, 4, 5, 6, 0}; Histogram loopExitCountTable(loopExitCountBuckets); #endif // COUNT_LOOPS //------------------------------------------------------------------------ // getJitGCType: Given the VM's CorInfoGCType convert it to the JIT's var_types // // Arguments: // gcType - an enum value that originally came from an element // of the BYTE[] returned from getClassGClayout() // // Return Value: // The corresponsing enum value from the JIT's var_types // // Notes: // The gcLayout of each field of a struct is returned from getClassGClayout() // as a BYTE[] but each BYTE element is actually a CorInfoGCType value // Note when we 'know' that there is only one element in theis array // the JIT will often pass the address of a single BYTE, instead of a BYTE[] // var_types Compiler::getJitGCType(BYTE gcType) { var_types result = TYP_UNKNOWN; CorInfoGCType corInfoType = (CorInfoGCType)gcType; if (corInfoType == TYPE_GC_NONE) { result = TYP_I_IMPL; } else if (corInfoType == TYPE_GC_REF) { result = TYP_REF; } else if (corInfoType == TYPE_GC_BYREF) { result = TYP_BYREF; } else { noway_assert(!"Bad value of 'gcType'"); } return result; } #ifdef TARGET_X86 //--------------------------------------------------------------------------- // isTrivialPointerSizedStruct: // Check if the given struct type contains only one pointer-sized integer value type // // Arguments: // clsHnd - the handle for the struct type. // // Return Value: // true if the given struct type contains only one pointer-sized integer value type, // false otherwise. // bool Compiler::isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const { assert(info.compCompHnd->isValueClass(clsHnd)); if (info.compCompHnd->getClassSize(clsHnd) != TARGET_POINTER_SIZE) { return false; } for (;;) { // all of class chain must be of value type and must have only one field if (!info.compCompHnd->isValueClass(clsHnd) || info.compCompHnd->getClassNumInstanceFields(clsHnd) != 1) { return false; } CORINFO_CLASS_HANDLE* pClsHnd = &clsHnd; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); CorInfoType fieldType = info.compCompHnd->getFieldType(fldHnd, pClsHnd); var_types vt = JITtype2varType(fieldType); if (fieldType == CORINFO_TYPE_VALUECLASS) { clsHnd = *pClsHnd; } else if (varTypeIsI(vt) && !varTypeIsGC(vt)) { return true; } else { return false; } } } #endif // TARGET_X86 //--------------------------------------------------------------------------- // isNativePrimitiveStructType: // Check if the given struct type is an intrinsic type that should be treated as though // it is not a struct at the unmanaged ABI boundary. // // Arguments: // clsHnd - the handle for the struct type. // // Return Value: // true if the given struct type should be treated as a primitive for unmanaged calls, // false otherwise. // bool Compiler::isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd) { if (!isIntrinsicType(clsHnd)) { return false; } const char* namespaceName = nullptr; const char* typeName = getClassNameFromMetadata(clsHnd, &namespaceName); if (strcmp(namespaceName, "System.Runtime.InteropServices") != 0) { return false; } return strcmp(typeName, "CLong") == 0 || strcmp(typeName, "CULong") == 0 || strcmp(typeName, "NFloat") == 0; } //----------------------------------------------------------------------------- // getPrimitiveTypeForStruct: // Get the "primitive" type that is is used for a struct // of size 'structSize'. // We examine 'clsHnd' to check the GC layout of the struct and // return TYP_REF for structs that simply wrap an object. // If the struct is a one element HFA/HVA, we will return the // proper floating point or vector type. // // Arguments: // structSize - the size of the struct type, cannot be zero // clsHnd - the handle for the struct type, used when may have // an HFA or if we need the GC layout for an object ref. // // Return Value: // The primitive type (i.e. byte, short, int, long, ref, float, double) // used to pass or return structs of this size. // If we shouldn't use a "primitive" type then TYP_UNKNOWN is returned. // Notes: // For 32-bit targets (X86/ARM32) the 64-bit TYP_LONG type is not // considered a primitive type by this method. // So a struct that wraps a 'long' is passed and returned in the // same way as any other 8-byte struct // For ARM32 if we have an HFA struct that wraps a 64-bit double // we will return TYP_DOUBLE. // For vector calling conventions, a vector is considered a "primitive" // type, as it is passed in a single register. // var_types Compiler::getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg) { assert(structSize != 0); var_types useType = TYP_UNKNOWN; // Start by determining if we have an HFA/HVA with a single element. if (GlobalJitOptions::compFeatureHfa) { // Arm64 Windows VarArg methods arguments will not classify HFA types, they will need to be treated // as if they are not HFA types. if (!(TargetArchitecture::IsArm64 && TargetOS::IsWindows && isVarArg)) { switch (structSize) { case 4: case 8: #ifdef TARGET_ARM64 case 16: #endif // TARGET_ARM64 { var_types hfaType = GetHfaType(clsHnd); // We're only interested in the case where the struct size is equal to the size of the hfaType. if (varTypeIsValidHfaType(hfaType)) { if (genTypeSize(hfaType) == structSize) { useType = hfaType; } else { return TYP_UNKNOWN; } } } } if (useType != TYP_UNKNOWN) { return useType; } } } // Now deal with non-HFA/HVA structs. switch (structSize) { case 1: useType = TYP_BYTE; break; case 2: useType = TYP_SHORT; break; #if !defined(TARGET_XARCH) || defined(UNIX_AMD64_ABI) case 3: useType = TYP_INT; break; #endif // !TARGET_XARCH || UNIX_AMD64_ABI #ifdef TARGET_64BIT case 4: // We dealt with the one-float HFA above. All other 4-byte structs are handled as INT. useType = TYP_INT; break; #if !defined(TARGET_XARCH) || defined(UNIX_AMD64_ABI) case 5: case 6: case 7: useType = TYP_I_IMPL; break; #endif // !TARGET_XARCH || UNIX_AMD64_ABI #endif // TARGET_64BIT case TARGET_POINTER_SIZE: { BYTE gcPtr = 0; // Check if this pointer-sized struct is wrapping a GC object info.compCompHnd->getClassGClayout(clsHnd, &gcPtr); useType = getJitGCType(gcPtr); } break; default: useType = TYP_UNKNOWN; break; } return useType; } //----------------------------------------------------------------------------- // getArgTypeForStruct: // Get the type that is used to pass values of the given struct type. // If you have already retrieved the struct size then it should be // passed as the optional fourth argument, as this allows us to avoid // an extra call to getClassSize(clsHnd) // // Arguments: // clsHnd - the handle for the struct type // wbPassStruct - An "out" argument with information about how // the struct is to be passed // isVarArg - is vararg, used to ignore HFA types for Arm64 windows varargs // structSize - the size of the struct type, // or zero if we should call getClassSize(clsHnd) // // Return Value: // For wbPassStruct you can pass a 'nullptr' and nothing will be written // or returned for that out parameter. // When *wbPassStruct is SPK_PrimitiveType this method's return value // is the primitive type used to pass the struct. // When *wbPassStruct is SPK_ByReference this method's return value // is always TYP_UNKNOWN and the struct type is passed by reference to a copy // When *wbPassStruct is SPK_ByValue or SPK_ByValueAsHfa this method's return value // is always TYP_STRUCT and the struct type is passed by value either // using multiple registers or on the stack. // // Assumptions: // The size must be the size of the given type. // The given class handle must be for a value type (struct). // // Notes: // About HFA types: // When the clsHnd is a one element HFA type we return the appropriate // floating point primitive type and *wbPassStruct is SPK_PrimitiveType // If there are two or more elements in the HFA type then the this method's // return value is TYP_STRUCT and *wbPassStruct is SPK_ByValueAsHfa // var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, structPassingKind* wbPassStruct, bool isVarArg, unsigned structSize) { var_types useType = TYP_UNKNOWN; structPassingKind howToPassStruct = SPK_Unknown; // We must change this before we return assert(structSize != 0); // Determine if we can pass the struct as a primitive type. // Note that on x86 we only pass specific pointer-sized structs that satisfy isTrivialPointerSizedStruct checks. #ifndef TARGET_X86 #ifdef UNIX_AMD64_ABI // An 8-byte struct may need to be passed in a floating point register // So we always consult the struct "Classifier" routine // SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(clsHnd, &structDesc); if (structDesc.passedInRegisters && (structDesc.eightByteCount != 1)) { // We can't pass this as a primitive type. } else if (structDesc.eightByteClassifications[0] == SystemVClassificationTypeSSE) { // If this is passed as a floating type, use that. // Otherwise, we'll use the general case - we don't want to use the "EightByteType" // directly, because it returns `TYP_INT` for any integral type <= 4 bytes, and // we need to preserve small types. useType = GetEightByteType(structDesc, 0); } else #endif // UNIX_AMD64_ABI // The largest arg passed in a single register is MAX_PASS_SINGLEREG_BYTES, // so we can skip calling getPrimitiveTypeForStruct when we // have a struct that is larger than that. // if (structSize <= MAX_PASS_SINGLEREG_BYTES) { // We set the "primitive" useType based upon the structSize // and also examine the clsHnd to see if it is an HFA of count one useType = getPrimitiveTypeForStruct(structSize, clsHnd, isVarArg); } #else if (isTrivialPointerSizedStruct(clsHnd)) { useType = TYP_I_IMPL; } #endif // !TARGET_X86 // Did we change this struct type into a simple "primitive" type? // if (useType != TYP_UNKNOWN) { // Yes, we should use the "primitive" type in 'useType' howToPassStruct = SPK_PrimitiveType; } else // We can't replace the struct with a "primitive" type { // See if we can pass this struct by value, possibly in multiple registers // or if we should pass it by reference to a copy // if (structSize <= MAX_PASS_MULTIREG_BYTES) { // Structs that are HFA/HVA's are passed by value in multiple registers. // Arm64 Windows VarArg methods arguments will not classify HFA/HVA types, they will need to be treated // as if they are not HFA/HVA types. var_types hfaType; if (TargetArchitecture::IsArm64 && TargetOS::IsWindows && isVarArg) { hfaType = TYP_UNDEF; } else { hfaType = GetHfaType(clsHnd); } if (varTypeIsValidHfaType(hfaType)) { // HFA's of count one should have been handled by getPrimitiveTypeForStruct assert(GetHfaCount(clsHnd) >= 2); // setup wbPassType and useType indicate that this is passed by value as an HFA // using multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValueAsHfa; useType = TYP_STRUCT; } else // Not an HFA struct type { #ifdef UNIX_AMD64_ABI // The case of (structDesc.eightByteCount == 1) should have already been handled if ((structDesc.eightByteCount > 1) || !structDesc.passedInRegisters) { // setup wbPassType and useType indicate that this is passed by value in multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; } else { assert(structDesc.eightByteCount == 0); // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register // (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_ARM64) // Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct assert(structSize > TARGET_POINTER_SIZE); // On ARM64 structs that are 9-16 bytes are passed by value in multiple registers // if (structSize <= (TARGET_POINTER_SIZE * 2)) { // setup wbPassType and useType indicate that this is passed by value in multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; } else // a structSize that is 17-32 bytes in size { // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register // (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_X86) || defined(TARGET_ARM) // Otherwise we pass this struct by value on the stack // setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getArgTypeForStruct (with FEATURE_MULTIREG_ARGS=1)"); #endif // TARGET_XXX } } else // (structSize > MAX_PASS_MULTIREG_BYTES) { // We have a (large) struct that can't be replaced with a "primitive" type // and can't be passed in multiple registers CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) || defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) // Otherwise we pass this struct by value on the stack // setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; #elif defined(TARGET_AMD64) || defined(TARGET_ARM64) // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getArgTypeForStruct"); #endif // TARGET_XXX } } // 'howToPassStruct' must be set to one of the valid values before we return assert(howToPassStruct != SPK_Unknown); if (wbPassStruct != nullptr) { *wbPassStruct = howToPassStruct; } return useType; } //----------------------------------------------------------------------------- // getReturnTypeForStruct: // Get the type that is used to return values of the given struct type. // If you have already retrieved the struct size then it should be // passed as the optional third argument, as this allows us to avoid // an extra call to getClassSize(clsHnd) // // Arguments: // clsHnd - the handle for the struct type // callConv - the calling convention of the function // that returns this struct. // wbReturnStruct - An "out" argument with information about how // the struct is to be returned // structSize - the size of the struct type, // or zero if we should call getClassSize(clsHnd) // // Return Value: // For wbReturnStruct you can pass a 'nullptr' and nothing will be written // or returned for that out parameter. // When *wbReturnStruct is SPK_PrimitiveType this method's return value // is the primitive type used to return the struct. // When *wbReturnStruct is SPK_ByReference this method's return value // is always TYP_UNKNOWN and the struct type is returned using a return buffer // When *wbReturnStruct is SPK_ByValue or SPK_ByValueAsHfa this method's return value // is always TYP_STRUCT and the struct type is returned using multiple registers. // // Assumptions: // The size must be the size of the given type. // The given class handle must be for a value type (struct). // // Notes: // About HFA types: // When the clsHnd is a one element HFA type then this method's return // value is the appropriate floating point primitive type and // *wbReturnStruct is SPK_PrimitiveType. // If there are two or more elements in the HFA type and the target supports // multireg return types then the return value is TYP_STRUCT and // *wbReturnStruct is SPK_ByValueAsHfa. // Additionally if there are two or more elements in the HFA type and // the target doesn't support multreg return types then it is treated // as if it wasn't an HFA type. // About returning TYP_STRUCT: // Whenever this method's return value is TYP_STRUCT it always means // that multiple registers are used to return this struct. // var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, CorInfoCallConvExtension callConv, structPassingKind* wbReturnStruct /* = nullptr */, unsigned structSize /* = 0 */) { var_types useType = TYP_UNKNOWN; structPassingKind howToReturnStruct = SPK_Unknown; // We must change this before we return bool canReturnInRegister = true; assert(clsHnd != NO_CLASS_HANDLE); if (structSize == 0) { structSize = info.compCompHnd->getClassSize(clsHnd); } assert(structSize > 0); #ifdef UNIX_AMD64_ABI // An 8-byte struct may need to be returned in a floating point register // So we always consult the struct "Classifier" routine // SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(clsHnd, &structDesc); if (structDesc.eightByteCount == 1) { assert(structSize <= sizeof(double)); assert(structDesc.passedInRegisters); if (structDesc.eightByteClassifications[0] == SystemVClassificationTypeSSE) { // If this is returned as a floating type, use that. // Otherwise, leave as TYP_UNKONWN and we'll sort things out below. useType = GetEightByteType(structDesc, 0); howToReturnStruct = SPK_PrimitiveType; } } else { // Return classification is not always size based... canReturnInRegister = structDesc.passedInRegisters; if (!canReturnInRegister) { assert(structDesc.eightByteCount == 0); howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } } #elif UNIX_X86_ABI if (callConv != CorInfoCallConvExtension::Managed && !isNativePrimitiveStructType(clsHnd)) { canReturnInRegister = false; howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #endif if (TargetOS::IsWindows && !TargetArchitecture::IsArm32 && callConvIsInstanceMethodCallConv(callConv) && !isNativePrimitiveStructType(clsHnd)) { canReturnInRegister = false; howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } // Check for cases where a small struct is returned in a register // via a primitive type. // // The largest "primitive type" is MAX_PASS_SINGLEREG_BYTES // so we can skip calling getPrimitiveTypeForStruct when we // have a struct that is larger than that. if (canReturnInRegister && (useType == TYP_UNKNOWN) && (structSize <= MAX_PASS_SINGLEREG_BYTES)) { // We set the "primitive" useType based upon the structSize // and also examine the clsHnd to see if it is an HFA of count one // // The ABI for struct returns in varArg methods, is same as the normal case, // so pass false for isVararg useType = getPrimitiveTypeForStruct(structSize, clsHnd, /*isVararg=*/false); if (useType != TYP_UNKNOWN) { if (structSize == genTypeSize(useType)) { // Currently: 1, 2, 4, or 8 byte structs howToReturnStruct = SPK_PrimitiveType; } else { // Currently: 3, 5, 6, or 7 byte structs assert(structSize < genTypeSize(useType)); howToReturnStruct = SPK_EnclosingType; } } } #ifdef TARGET_64BIT // Note this handles an odd case when FEATURE_MULTIREG_RET is disabled and HFAs are enabled // // getPrimitiveTypeForStruct will return TYP_UNKNOWN for a struct that is an HFA of two floats // because when HFA are enabled, normally we would use two FP registers to pass or return it // // But if we don't have support for multiple register return types, we have to change this. // Since what we have is an 8-byte struct (float + float) we change useType to TYP_I_IMPL // so that the struct is returned instead using an 8-byte integer register. // if ((FEATURE_MULTIREG_RET == 0) && (useType == TYP_UNKNOWN) && (structSize == (2 * sizeof(float))) && IsHfa(clsHnd)) { useType = TYP_I_IMPL; howToReturnStruct = SPK_PrimitiveType; } #endif // Did we change this struct type into a simple "primitive" type? if (useType != TYP_UNKNOWN) { // If so, we should have already set howToReturnStruct, too. assert(howToReturnStruct != SPK_Unknown); } else if (canReturnInRegister) // We can't replace the struct with a "primitive" type { // See if we can return this struct by value, possibly in multiple registers // or if we should return it using a return buffer register // if ((FEATURE_MULTIREG_RET == 1) && (structSize <= MAX_RET_MULTIREG_BYTES)) { // Structs that are HFA's are returned in multiple registers if (IsHfa(clsHnd)) { // HFA's of count one should have been handled by getPrimitiveTypeForStruct assert(GetHfaCount(clsHnd) >= 2); // setup wbPassType and useType indicate that this is returned by value as an HFA // using multiple registers howToReturnStruct = SPK_ByValueAsHfa; useType = TYP_STRUCT; } else // Not an HFA struct type { #ifdef UNIX_AMD64_ABI // The cases of (structDesc.eightByteCount == 1) and (structDesc.eightByteCount == 0) // should have already been handled assert(structDesc.eightByteCount > 1); // setup wbPassType and useType indicate that this is returned by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; assert(structDesc.passedInRegisters == true); #elif defined(TARGET_ARM64) // Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct assert(structSize > TARGET_POINTER_SIZE); // On ARM64 structs that are 9-16 bytes are returned by value in multiple registers // if (structSize <= (TARGET_POINTER_SIZE * 2)) { // setup wbPassType and useType indicate that this is return by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; } else // a structSize that is 17-32 bytes in size { // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_X86) // Only 8-byte structs are return in multiple registers. // We also only support multireg struct returns on x86 to match the native calling convention. // So return 8-byte structs only when the calling convention is a native calling convention. if (structSize == MAX_RET_MULTIREG_BYTES && callConv != CorInfoCallConvExtension::Managed) { // setup wbPassType and useType indicate that this is return by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; } else { // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_ARM) // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getReturnTypeForStruct (with FEATURE_MULTIREG_ARGS=1)"); #endif // TARGET_XXX } } else // (structSize > MAX_RET_MULTIREG_BYTES) || (FEATURE_MULTIREG_RET == 0) { // We have a (large) struct that can't be replaced with a "primitive" type // and can't be returned in multiple registers // We return this struct using a return buffer register // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } } // 'howToReturnStruct' must be set to one of the valid values before we return assert(howToReturnStruct != SPK_Unknown); if (wbReturnStruct != nullptr) { *wbReturnStruct = howToReturnStruct; } return useType; } /////////////////////////////////////////////////////////////////////////////// // // MEASURE_NOWAY: code to measure and rank dynamic occurrences of noway_assert. // (Just the appearances of noway_assert, whether the assert is true or false.) // This might help characterize the cost of noway_assert in non-DEBUG builds, // or determine which noway_assert should be simple DEBUG-only asserts. // /////////////////////////////////////////////////////////////////////////////// #if MEASURE_NOWAY struct FileLine { char* m_file; unsigned m_line; char* m_condStr; FileLine() : m_file(nullptr), m_line(0), m_condStr(nullptr) { } FileLine(const char* file, unsigned line, const char* condStr) : m_line(line) { size_t newSize = (strlen(file) + 1) * sizeof(char); m_file = HostAllocator::getHostAllocator().allocate<char>(newSize); strcpy_s(m_file, newSize, file); newSize = (strlen(condStr) + 1) * sizeof(char); m_condStr = HostAllocator::getHostAllocator().allocate<char>(newSize); strcpy_s(m_condStr, newSize, condStr); } FileLine(const FileLine& other) { m_file = other.m_file; m_line = other.m_line; m_condStr = other.m_condStr; } // GetHashCode() and Equals() are needed by JitHashTable static unsigned GetHashCode(FileLine fl) { assert(fl.m_file != nullptr); unsigned code = fl.m_line; for (const char* p = fl.m_file; *p != '\0'; p++) { code += *p; } // Could also add condStr. return code; } static bool Equals(FileLine fl1, FileLine fl2) { return (fl1.m_line == fl2.m_line) && (0 == strcmp(fl1.m_file, fl2.m_file)); } }; typedef JitHashTable<FileLine, FileLine, size_t, HostAllocator> FileLineToCountMap; FileLineToCountMap* NowayAssertMap; void Compiler::RecordNowayAssert(const char* filename, unsigned line, const char* condStr) { if (NowayAssertMap == nullptr) { NowayAssertMap = new (HostAllocator::getHostAllocator()) FileLineToCountMap(HostAllocator::getHostAllocator()); } FileLine fl(filename, line, condStr); size_t* pCount = NowayAssertMap->LookupPointer(fl); if (pCount == nullptr) { NowayAssertMap->Set(fl, 1); } else { ++(*pCount); } } void RecordNowayAssertGlobal(const char* filename, unsigned line, const char* condStr) { if ((JitConfig.JitMeasureNowayAssert() == 1) && (JitTls::GetCompiler() != nullptr)) { JitTls::GetCompiler()->RecordNowayAssert(filename, line, condStr); } } struct NowayAssertCountMap { size_t count; FileLine fl; NowayAssertCountMap() : count(0) { } struct compare { bool operator()(const NowayAssertCountMap& elem1, const NowayAssertCountMap& elem2) { return (ssize_t)elem2.count < (ssize_t)elem1.count; // sort in descending order } }; }; void DisplayNowayAssertMap() { if (NowayAssertMap != nullptr) { FILE* fout; LPCWSTR strJitMeasureNowayAssertFile = JitConfig.JitMeasureNowayAssertFile(); if (strJitMeasureNowayAssertFile != nullptr) { fout = _wfopen(strJitMeasureNowayAssertFile, W("a")); if (fout == nullptr) { fprintf(jitstdout, "Failed to open JitMeasureNowayAssertFile \"%ws\"\n", strJitMeasureNowayAssertFile); return; } } else { fout = jitstdout; } // Iterate noway assert map, create sorted table by occurrence, dump it. unsigned count = NowayAssertMap->GetCount(); NowayAssertCountMap* nacp = new NowayAssertCountMap[count]; unsigned i = 0; for (FileLineToCountMap::KeyIterator iter = NowayAssertMap->Begin(), end = NowayAssertMap->End(); !iter.Equal(end); ++iter) { nacp[i].count = iter.GetValue(); nacp[i].fl = iter.Get(); ++i; } jitstd::sort(nacp, nacp + count, NowayAssertCountMap::compare()); if (fout == jitstdout) { // Don't output the header if writing to a file, since we'll be appending to existing dumps in that case. fprintf(fout, "\nnoway_assert counts:\n"); fprintf(fout, "count, file, line, text\n"); } for (i = 0; i < count; i++) { fprintf(fout, "%u, %s, %u, \"%s\"\n", nacp[i].count, nacp[i].fl.m_file, nacp[i].fl.m_line, nacp[i].fl.m_condStr); } if (fout != jitstdout) { fclose(fout); fout = nullptr; } } } #endif // MEASURE_NOWAY /***************************************************************************** * variables to keep track of how many iterations we go in a dataflow pass */ #if DATAFLOW_ITER unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow #endif // DATAFLOW_ITER #if MEASURE_BLOCK_SIZE size_t genFlowNodeSize; size_t genFlowNodeCnt; #endif // MEASURE_BLOCK_SIZE /*****************************************************************************/ // We keep track of methods we've already compiled. /***************************************************************************** * Declare the statics */ #ifdef DEBUG /* static */ LONG Compiler::s_compMethodsCount = 0; // to produce unique label names #endif #if MEASURE_MEM_ALLOC /* static */ bool Compiler::s_dspMemStats = false; #endif #ifndef PROFILING_SUPPORTED const bool Compiler::Options::compNoPInvokeInlineCB = false; #endif /***************************************************************************** * * One time initialization code */ /* static */ void Compiler::compStartup() { #if DISPLAY_SIZES grossVMsize = grossNCsize = totalNCsize = 0; #endif // DISPLAY_SIZES /* Initialize the table of tree node sizes */ GenTree::InitNodeSize(); #ifdef JIT32_GCENCODER // Initialize the GC encoder lookup table GCInfo::gcInitEncoderLookupTable(); #endif /* Initialize the emitter */ emitter::emitInit(); // Static vars of ValueNumStore ValueNumStore::InitValueNumStoreStatics(); compDisplayStaticSizes(jitstdout); } /***************************************************************************** * * One time finalization code */ /* static */ void Compiler::compShutdown() { if (s_pAltJitExcludeAssembliesList != nullptr) { s_pAltJitExcludeAssembliesList->~AssemblyNamesList2(); // call the destructor s_pAltJitExcludeAssembliesList = nullptr; } #ifdef DEBUG if (s_pJitDisasmIncludeAssembliesList != nullptr) { s_pJitDisasmIncludeAssembliesList->~AssemblyNamesList2(); // call the destructor s_pJitDisasmIncludeAssembliesList = nullptr; } #endif // DEBUG #if MEASURE_NOWAY DisplayNowayAssertMap(); #endif // MEASURE_NOWAY /* Shut down the emitter */ emitter::emitDone(); #if defined(DEBUG) || defined(INLINE_DATA) // Finish reading and/or writing inline xml if (JitConfig.JitInlineDumpXmlFile() != nullptr) { FILE* file = _wfopen(JitConfig.JitInlineDumpXmlFile(), W("a")); if (file != nullptr) { InlineStrategy::FinalizeXml(file); fclose(file); } else { InlineStrategy::FinalizeXml(); } } #endif // defined(DEBUG) || defined(INLINE_DATA) #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS if (genMethodCnt == 0) { return; } #endif #if NODEBASH_STATS GenTree::ReportOperBashing(jitstdout); #endif // Where should we write our statistics output? FILE* fout = jitstdout; #ifdef FEATURE_JIT_METHOD_PERF if (compJitTimeLogFilename != nullptr) { FILE* jitTimeLogFile = _wfopen(compJitTimeLogFilename, W("a")); if (jitTimeLogFile != nullptr) { CompTimeSummaryInfo::s_compTimeSummary.Print(jitTimeLogFile); fclose(jitTimeLogFile); } } JitTimer::Shutdown(); #endif // FEATURE_JIT_METHOD_PERF #if COUNT_AST_OPERS // Add up all the counts so that we can show percentages of total unsigned totalCount = 0; for (unsigned op = 0; op < GT_COUNT; op++) { totalCount += GenTree::s_gtNodeCounts[op]; } if (totalCount > 0) { struct OperInfo { unsigned Count; unsigned Size; genTreeOps Oper; }; OperInfo opers[GT_COUNT]; for (unsigned op = 0; op < GT_COUNT; op++) { opers[op] = {GenTree::s_gtNodeCounts[op], GenTree::s_gtTrueSizes[op], static_cast<genTreeOps>(op)}; } jitstd::sort(opers, opers + ArrLen(opers), [](const OperInfo& l, const OperInfo& r) { // We'll be sorting in descending order. return l.Count >= r.Count; }); unsigned remainingCount = totalCount; unsigned remainingCountLarge = 0; unsigned remainingCountSmall = 0; unsigned countLarge = 0; unsigned countSmall = 0; fprintf(fout, "\nGenTree operator counts (approximate):\n\n"); for (OperInfo oper : opers) { unsigned size = oper.Size; unsigned count = oper.Count; double percentage = 100.0 * count / totalCount; if (size > TREE_NODE_SZ_SMALL) { countLarge += count; } else { countSmall += count; } // Let's not show anything below a threshold if (percentage >= 0.5) { fprintf(fout, " GT_%-17s %7u (%4.1lf%%) %3u bytes each\n", GenTree::OpName(oper.Oper), count, percentage, size); remainingCount -= count; } else { if (size > TREE_NODE_SZ_SMALL) { remainingCountLarge += count; } else { remainingCountSmall += count; } } } if (remainingCount > 0) { fprintf(fout, " All other GT_xxx ... %7u (%4.1lf%%) ... %4.1lf%% small + %4.1lf%% large\n", remainingCount, 100.0 * remainingCount / totalCount, 100.0 * remainingCountSmall / totalCount, 100.0 * remainingCountLarge / totalCount); } fprintf(fout, " -----------------------------------------------------\n"); fprintf(fout, " Total ....... %11u --ALL-- ... %4.1lf%% small + %4.1lf%% large\n", totalCount, 100.0 * countSmall / totalCount, 100.0 * countLarge / totalCount); fprintf(fout, "\n"); } #endif // COUNT_AST_OPERS #if DISPLAY_SIZES if (grossVMsize && grossNCsize) { fprintf(fout, "\n"); fprintf(fout, "--------------------------------------\n"); fprintf(fout, "Function and GC info size stats\n"); fprintf(fout, "--------------------------------------\n"); fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, grossNCsize, Target::g_tgtCPUName, 100 * grossNCsize / grossVMsize, "Total (excluding GC info)"); fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, totalNCsize, Target::g_tgtCPUName, 100 * totalNCsize / grossVMsize, "Total (including GC info)"); if (gcHeaderISize || gcHeaderNSize) { fprintf(fout, "\n"); fprintf(fout, "GC tables : [%7uI,%7uN] %7u byt (%u%% of IL, %u%% of %s).\n", gcHeaderISize + gcPtrMapISize, gcHeaderNSize + gcPtrMapNSize, totalNCsize - grossNCsize, 100 * (totalNCsize - grossNCsize) / grossVMsize, 100 * (totalNCsize - grossNCsize) / grossNCsize, Target::g_tgtCPUName); fprintf(fout, "GC headers : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcHeaderISize, gcHeaderNSize, gcHeaderISize + gcHeaderNSize, (float)gcHeaderISize / (genMethodICnt + 0.001), (float)gcHeaderNSize / (genMethodNCnt + 0.001), (float)(gcHeaderISize + gcHeaderNSize) / genMethodCnt); fprintf(fout, "GC ptr maps : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcPtrMapISize, gcPtrMapNSize, gcPtrMapISize + gcPtrMapNSize, (float)gcPtrMapISize / (genMethodICnt + 0.001), (float)gcPtrMapNSize / (genMethodNCnt + 0.001), (float)(gcPtrMapISize + gcPtrMapNSize) / genMethodCnt); } else { fprintf(fout, "\n"); fprintf(fout, "GC tables take up %u bytes (%u%% of instr, %u%% of %6s code).\n", totalNCsize - grossNCsize, 100 * (totalNCsize - grossNCsize) / grossVMsize, 100 * (totalNCsize - grossNCsize) / grossNCsize, Target::g_tgtCPUName); } #ifdef DEBUG #if DOUBLE_ALIGN fprintf(fout, "%u out of %u methods generated with double-aligned stack\n", Compiler::s_lvaDoubleAlignedProcsCount, genMethodCnt); #endif #endif } #endif // DISPLAY_SIZES #if CALL_ARG_STATS compDispCallArgStats(fout); #endif #if COUNT_BASIC_BLOCKS fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Basic block count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); bbCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "IL method size frequency table for methods with a single basic block:\n"); fprintf(fout, "--------------------------------------------------\n"); bbOneBBSizeTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); #endif // COUNT_BASIC_BLOCKS #if COUNT_LOOPS fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Loop stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Total number of methods with loops is %5u\n", totalLoopMethods); fprintf(fout, "Total number of loops is %5u\n", totalLoopCount); fprintf(fout, "Maximum number of loops per method is %5u\n", maxLoopsPerMethod); fprintf(fout, "# of methods overflowing nat loop table is %5u\n", totalLoopOverflows); fprintf(fout, "Total number of 'unnatural' loops is %5u\n", totalUnnatLoopCount); fprintf(fout, "# of methods overflowing unnat loop limit is %5u\n", totalUnnatLoopOverflows); fprintf(fout, "Total number of loops with an iterator is %5u\n", iterLoopCount); fprintf(fout, "Total number of loops with a simple iterator is %5u\n", simpleTestLoopCount); fprintf(fout, "Total number of loops with a constant iterator is %5u\n", constIterLoopCount); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Loop count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); loopCountTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Loop exit count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); loopExitCountTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); #endif // COUNT_LOOPS #if DATAFLOW_ITER fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Total number of iterations in the CSE dataflow loop is %5u\n", CSEiterCount); fprintf(fout, "Total number of iterations in the CF dataflow loop is %5u\n", CFiterCount); #endif // DATAFLOW_ITER #if MEASURE_NODE_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "GenTree node allocation stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Allocated %6I64u tree nodes (%7I64u bytes total, avg %4I64u bytes per method)\n", genNodeSizeStats.genTreeNodeCnt, genNodeSizeStats.genTreeNodeSize, genNodeSizeStats.genTreeNodeSize / genMethodCnt); fprintf(fout, "Allocated %7I64u bytes of unused tree node space (%3.2f%%)\n", genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize, (float)(100 * (genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize)) / genNodeSizeStats.genTreeNodeSize); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of per-method GenTree node counts:\n"); genTreeNcntHist.dump(fout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of per-method GenTree node allocations (in bytes):\n"); genTreeNsizHist.dump(fout); #endif // MEASURE_NODE_SIZE #if MEASURE_BLOCK_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "BasicBlock and flowList/BasicBlockList allocation stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Allocated %6u basic blocks (%7u bytes total, avg %4u bytes per method)\n", BasicBlock::s_Count, BasicBlock::s_Size, BasicBlock::s_Size / genMethodCnt); fprintf(fout, "Allocated %6u flow nodes (%7u bytes total, avg %4u bytes per method)\n", genFlowNodeCnt, genFlowNodeSize, genFlowNodeSize / genMethodCnt); #endif // MEASURE_BLOCK_SIZE #if MEASURE_MEM_ALLOC if (s_dspMemStats) { fprintf(fout, "\nAll allocations:\n"); ArenaAllocator::dumpAggregateMemStats(jitstdout); fprintf(fout, "\nLargest method:\n"); ArenaAllocator::dumpMaxMemStats(jitstdout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of total memory allocated per method (in KB):\n"); memAllocHist.dump(fout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of total memory used per method (in KB):\n"); memUsedHist.dump(fout); } #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS #ifdef DEBUG // Always display loop stats in retail if (JitConfig.DisplayLoopHoistStats() != 0) #endif // DEBUG { PrintAggregateLoopHoistStats(jitstdout); } #endif // LOOP_HOIST_STATS #if TRACK_ENREG_STATS if (JitConfig.JitEnregStats() != 0) { s_enregisterStats.Dump(fout); } #endif // TRACK_ENREG_STATS #if MEASURE_PTRTAB_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "GC pointer table stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Reg pointer descriptor size (internal): %8u (avg %4u per method)\n", GCInfo::s_gcRegPtrDscSize, GCInfo::s_gcRegPtrDscSize / genMethodCnt); fprintf(fout, "Total pointer table size: %8u (avg %4u per method)\n", GCInfo::s_gcTotalPtrTabSize, GCInfo::s_gcTotalPtrTabSize / genMethodCnt); #endif // MEASURE_PTRTAB_SIZE #if MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || MEASURE_PTRTAB_SIZE || DISPLAY_SIZES if (genMethodCnt != 0) { fprintf(fout, "\n"); fprintf(fout, "A total of %6u methods compiled", genMethodCnt); #if DISPLAY_SIZES if (genMethodICnt || genMethodNCnt) { fprintf(fout, " (%u interruptible, %u non-interruptible)", genMethodICnt, genMethodNCnt); } #endif // DISPLAY_SIZES fprintf(fout, ".\n"); } #endif // MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || MEASURE_PTRTAB_SIZE || DISPLAY_SIZES #if EMITTER_STATS emitterStats(fout); #endif #if MEASURE_FATAL fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Fatal errors stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, " badCode: %u\n", fatal_badCode); fprintf(fout, " noWay: %u\n", fatal_noWay); fprintf(fout, " implLimitation: %u\n", fatal_implLimitation); fprintf(fout, " NOMEM: %u\n", fatal_NOMEM); fprintf(fout, " noWayAssertBody: %u\n", fatal_noWayAssertBody); #ifdef DEBUG fprintf(fout, " noWayAssertBodyArgs: %u\n", fatal_noWayAssertBodyArgs); #endif // DEBUG fprintf(fout, " NYI: %u\n", fatal_NYI); #endif // MEASURE_FATAL } /***************************************************************************** * Display static data structure sizes. */ /* static */ void Compiler::compDisplayStaticSizes(FILE* fout) { #if MEASURE_NODE_SIZE GenTree::DumpNodeSizes(fout); #endif #if EMITTER_STATS emitterStaticStats(fout); #endif } /***************************************************************************** * * Constructor */ void Compiler::compInit(ArenaAllocator* pAlloc, CORINFO_METHOD_HANDLE methodHnd, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, InlineInfo* inlineInfo) { assert(pAlloc); compArenaAllocator = pAlloc; // Inlinee Compile object will only be allocated when needed for the 1st time. InlineeCompiler = nullptr; // Set the inline info. impInlineInfo = inlineInfo; info.compCompHnd = compHnd; info.compMethodHnd = methodHnd; info.compMethodInfo = methodInfo; #ifdef DEBUG bRangeAllowStress = false; #endif #if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS // Initialize the method name and related info, as it is used early in determining whether to // apply stress modes, and which ones to apply. // Note that even allocating memory can invoke the stress mechanism, so ensure that both // 'compMethodName' and 'compFullName' are either null or valid before we allocate. // (The stress mode checks references these prior to checking bRangeAllowStress.) // info.compMethodName = nullptr; info.compClassName = nullptr; info.compFullName = nullptr; const char* classNamePtr; const char* methodName; methodName = eeGetMethodName(methodHnd, &classNamePtr); unsigned len = (unsigned)roundUp(strlen(classNamePtr) + 1); info.compClassName = getAllocator(CMK_DebugOnly).allocate<char>(len); info.compMethodName = methodName; strcpy_s((char*)info.compClassName, len, classNamePtr); info.compFullName = eeGetMethodFullName(methodHnd); info.compPerfScore = 0.0; info.compMethodSuperPMIIndex = g_jitHost->getIntConfigValue(W("SuperPMIMethodContextNumber"), -1); #endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS #if defined(DEBUG) || defined(INLINE_DATA) info.compMethodHashPrivate = 0; #endif // defined(DEBUG) || defined(INLINE_DATA) #ifdef DEBUG // Opt-in to jit stress based on method hash ranges. // // Note the default (with JitStressRange not set) is that all // methods will be subject to stress. static ConfigMethodRange fJitStressRange; fJitStressRange.EnsureInit(JitConfig.JitStressRange()); assert(!fJitStressRange.Error()); bRangeAllowStress = fJitStressRange.Contains(info.compMethodHash()); #endif // DEBUG eeInfoInitialized = false; compDoAggressiveInlining = false; if (compIsForInlining()) { m_inlineStrategy = nullptr; compInlineResult = inlineInfo->inlineResult; } else { m_inlineStrategy = new (this, CMK_Inlining) InlineStrategy(this); compInlineResult = nullptr; } // Initialize this to the first phase to run. mostRecentlyActivePhase = PHASE_PRE_IMPORT; // Initially, no phase checks are active. activePhaseChecks = PhaseChecks::CHECK_NONE; #ifdef FEATURE_TRACELOGGING // Make sure JIT telemetry is initialized as soon as allocations can be made // but no later than a point where noway_asserts can be thrown. // 1. JIT telemetry could allocate some objects internally. // 2. NowayAsserts are tracked through telemetry. // Note: JIT telemetry could gather data when compiler is not fully initialized. // So you have to initialize the compiler variables you use for telemetry. assert((unsigned)PHASE_PRE_IMPORT == 0); info.compILCodeSize = 0; info.compMethodHnd = nullptr; compJitTelemetry.Initialize(this); #endif fgInit(); lvaInit(); if (!compIsForInlining()) { codeGen = getCodeGenerator(this); optInit(); hashBv::Init(this); compVarScopeMap = nullptr; // If this method were a real constructor for Compiler, these would // become method initializations. impPendingBlockMembers = JitExpandArray<BYTE>(getAllocator()); impSpillCliquePredMembers = JitExpandArray<BYTE>(getAllocator()); impSpillCliqueSuccMembers = JitExpandArray<BYTE>(getAllocator()); new (&genIPmappings, jitstd::placement_t()) jitstd::list<IPmappingDsc>(getAllocator(CMK_DebugInfo)); #ifdef DEBUG new (&genPreciseIPmappings, jitstd::placement_t()) jitstd::list<PreciseIPMapping>(getAllocator(CMK_DebugOnly)); #endif lvMemoryPerSsaData = SsaDefArray<SsaMemDef>(); // // Initialize all the per-method statistics gathering data structures. // optLoopsCloned = 0; #if LOOP_HOIST_STATS m_loopsConsidered = 0; m_curLoopHasHoistedExpression = false; m_loopsWithHoistedExpressions = 0; m_totalHoistedExpressions = 0; #endif // LOOP_HOIST_STATS #if MEASURE_NODE_SIZE genNodeSizeStatsPerFunc.Init(); #endif // MEASURE_NODE_SIZE } else { codeGen = nullptr; } compJmpOpUsed = false; compLongUsed = false; compTailCallUsed = false; compTailPrefixSeen = false; compLocallocSeen = false; compLocallocUsed = false; compLocallocOptimized = false; compQmarkRationalized = false; compQmarkUsed = false; compFloatingPointUsed = false; compSuppressedZeroInit = false; compNeedsGSSecurityCookie = false; compGSReorderStackLayout = false; compGeneratingProlog = false; compGeneratingEpilog = false; compLSRADone = false; compRationalIRForm = false; #ifdef DEBUG compCodeGenDone = false; opts.compMinOptsIsUsed = false; #endif opts.compMinOptsIsSet = false; // Used by fgFindJumpTargets for inlining heuristics. opts.instrCount = 0; // Used to track when we should consider running EarlyProp optMethodFlags = 0; optNoReturnCallCount = 0; #ifdef DEBUG m_nodeTestData = nullptr; m_loopHoistCSEClass = FIRST_LOOP_HOIST_CSE_CLASS; #endif m_switchDescMap = nullptr; m_blockToEHPreds = nullptr; m_fieldSeqStore = nullptr; m_zeroOffsetFieldMap = nullptr; m_arrayInfoMap = nullptr; m_refAnyClass = nullptr; for (MemoryKind memoryKind : allMemoryKinds()) { m_memorySsaMap[memoryKind] = nullptr; } #ifdef DEBUG if (!compIsForInlining()) { compDoComponentUnitTestsOnce(); } #endif // DEBUG vnStore = nullptr; m_opAsgnVarDefSsaNums = nullptr; m_nodeToLoopMemoryBlockMap = nullptr; fgSsaPassesCompleted = 0; fgVNPassesCompleted = 0; // check that HelperCallProperties are initialized assert(s_helperCallProperties.IsPure(CORINFO_HELP_GETSHARED_GCSTATIC_BASE)); assert(!s_helperCallProperties.IsPure(CORINFO_HELP_GETFIELDOBJ)); // quick sanity check // We start with the flow graph in tree-order fgOrder = FGOrderTree; m_classLayoutTable = nullptr; #ifdef FEATURE_SIMD m_simdHandleCache = nullptr; #endif // FEATURE_SIMD compUsesThrowHelper = false; } /***************************************************************************** * * Destructor */ void Compiler::compDone() { } void* Compiler::compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ void** ppIndirection) /* OUT */ { void* addr; if (info.compMatchedVM) { addr = info.compCompHnd->getHelperFtn(ftnNum, ppIndirection); } else { // If we don't have a matched VM, we won't get valid results when asking for a helper function. addr = UlongToPtr(0xCA11CA11); // "callcall" } return addr; } unsigned Compiler::compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd) { var_types sigType = genActualType(JITtype2varType(cit)); unsigned sigSize; sigSize = genTypeSize(sigType); if (cit == CORINFO_TYPE_VALUECLASS) { sigSize = info.compCompHnd->getClassSize(clsHnd); } else if (cit == CORINFO_TYPE_REFANY) { sigSize = 2 * TARGET_POINTER_SIZE; } return sigSize; } #ifdef DEBUG static bool DidComponentUnitTests = false; void Compiler::compDoComponentUnitTestsOnce() { if (!JitConfig.RunComponentUnitTests()) { return; } if (!DidComponentUnitTests) { DidComponentUnitTests = true; ValueNumStore::RunTests(this); BitSetSupport::TestSuite(getAllocatorDebugOnly()); } } //------------------------------------------------------------------------ // compGetJitDefaultFill: // // Return Value: // An unsigned char value used to initizalize memory allocated by the JIT. // The default value is taken from COMPLUS_JitDefaultFill, if is not set // the value will be 0xdd. When JitStress is active a random value based // on the method hash is used. // // Notes: // Note that we can't use small values like zero, because we have some // asserts that can fire for such values. // // static unsigned char Compiler::compGetJitDefaultFill(Compiler* comp) { unsigned char defaultFill = (unsigned char)JitConfig.JitDefaultFill(); if (comp != nullptr && comp->compStressCompile(STRESS_GENERIC_VARN, 50)) { unsigned temp; temp = comp->info.compMethodHash(); temp = (temp >> 16) ^ temp; temp = (temp >> 8) ^ temp; temp = temp & 0xff; // asserts like this: assert(!IsUninitialized(stkLvl)); // mean that small values for defaultFill are problematic // so we make the value larger in that case. if (temp < 0x20) { temp |= 0x80; } // Make a misaligned pointer value to reduce probability of getting a valid value and firing // assert(!IsUninitialized(pointer)). temp |= 0x1; defaultFill = (unsigned char)temp; } return defaultFill; } #endif // DEBUG /*****************************************************************************/ #ifdef DEBUG /*****************************************************************************/ VarName Compiler::compVarName(regNumber reg, bool isFloatReg) { if (isFloatReg) { assert(genIsValidFloatReg(reg)); } else { assert(genIsValidReg(reg)); } if ((info.compVarScopesCount > 0) && compCurBB && opts.varNames) { unsigned lclNum; LclVarDsc* varDsc; /* Look for the matching register */ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { /* If the variable is not in a register, or not in the register we're looking for, quit. */ /* Also, if it is a compiler generated variable (i.e. slot# > info.compVarScopesCount), don't bother. */ if ((varDsc->lvRegister != 0) && (varDsc->GetRegNum() == reg) && (varDsc->lvSlotNum < info.compVarScopesCount)) { /* check if variable in that register is live */ if (VarSetOps::IsMember(this, compCurLife, varDsc->lvVarIndex)) { /* variable is live - find the corresponding slot */ VarScopeDsc* varScope = compFindLocalVar(varDsc->lvSlotNum, compCurBB->bbCodeOffs, compCurBB->bbCodeOffsEnd); if (varScope) { return varScope->vsdName; } } } } } return nullptr; } const char* Compiler::compRegVarName(regNumber reg, bool displayVar, bool isFloatReg) { #ifdef TARGET_ARM isFloatReg = genIsValidFloatReg(reg); #endif if (displayVar && (reg != REG_NA)) { VarName varName = compVarName(reg, isFloatReg); if (varName) { const int NAME_VAR_REG_BUFFER_LEN = 4 + 256 + 1; static char nameVarReg[2][NAME_VAR_REG_BUFFER_LEN]; // to avoid overwriting the buffer when have 2 // consecutive calls before printing static int index = 0; // for circular index into the name array index = (index + 1) % 2; // circular reuse of index sprintf_s(nameVarReg[index], NAME_VAR_REG_BUFFER_LEN, "%s'%s'", getRegName(reg), VarNameToStr(varName)); return nameVarReg[index]; } } /* no debug info required or no variable in that register -> return standard name */ return getRegName(reg); } const char* Compiler::compRegNameForSize(regNumber reg, size_t size) { if (size == 0 || size >= 4) { return compRegVarName(reg, true); } // clang-format off static const char * sizeNames[][2] = { { "al", "ax" }, { "cl", "cx" }, { "dl", "dx" }, { "bl", "bx" }, #ifdef TARGET_AMD64 { "spl", "sp" }, // ESP { "bpl", "bp" }, // EBP { "sil", "si" }, // ESI { "dil", "di" }, // EDI { "r8b", "r8w" }, { "r9b", "r9w" }, { "r10b", "r10w" }, { "r11b", "r11w" }, { "r12b", "r12w" }, { "r13b", "r13w" }, { "r14b", "r14w" }, { "r15b", "r15w" }, #endif // TARGET_AMD64 }; // clang-format on assert(isByteReg(reg)); assert(genRegMask(reg) & RBM_BYTE_REGS); assert(size == 1 || size == 2); return sizeNames[reg][size - 1]; } const char* Compiler::compLocalVarName(unsigned varNum, unsigned offs) { unsigned i; VarScopeDsc* t; for (i = 0, t = info.compVarScopes; i < info.compVarScopesCount; i++, t++) { if (t->vsdVarNum != varNum) { continue; } if (offs >= t->vsdLifeBeg && offs < t->vsdLifeEnd) { return VarNameToStr(t->vsdName); } } return nullptr; } /*****************************************************************************/ #endif // DEBUG /*****************************************************************************/ void Compiler::compSetProcessor() { // // NOTE: This function needs to be kept in sync with EEJitManager::SetCpuInfo() in vm\codeman.cpp // const JitFlags& jitFlags = *opts.jitFlags; #if defined(TARGET_ARM) info.genCPU = CPU_ARM; #elif defined(TARGET_ARM64) info.genCPU = CPU_ARM64; #elif defined(TARGET_AMD64) info.genCPU = CPU_X64; #elif defined(TARGET_X86) if (jitFlags.IsSet(JitFlags::JIT_FLAG_TARGET_P4)) info.genCPU = CPU_X86_PENTIUM_4; else info.genCPU = CPU_X86; #endif // // Processor specific optimizations // CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_AMD64 opts.compUseCMOV = true; #elif defined(TARGET_X86) opts.compUseCMOV = jitFlags.IsSet(JitFlags::JIT_FLAG_USE_CMOV); #ifdef DEBUG if (opts.compUseCMOV) opts.compUseCMOV = !compStressCompile(STRESS_USE_CMOV, 50); #endif // DEBUG #endif // TARGET_X86 // The VM will set the ISA flags depending on actual hardware support // and any specified config switches specified by the user. The exception // here is for certain "artificial ISAs" such as Vector64/128/256 where they // don't actually exist. The JIT is in charge of adding those and ensuring // the total sum of flags is still valid. CORINFO_InstructionSetFlags instructionSetFlags = jitFlags.GetInstructionSetFlags(); opts.compSupportsISA = 0; opts.compSupportsISAReported = 0; opts.compSupportsISAExactly = 0; #if defined(TARGET_XARCH) instructionSetFlags.AddInstructionSet(InstructionSet_Vector128); instructionSetFlags.AddInstructionSet(InstructionSet_Vector256); #endif // TARGET_XARCH #if defined(TARGET_ARM64) instructionSetFlags.AddInstructionSet(InstructionSet_Vector64); instructionSetFlags.AddInstructionSet(InstructionSet_Vector128); #endif // TARGET_ARM64 instructionSetFlags = EnsureInstructionSetFlagsAreValid(instructionSetFlags); opts.setSupportedISAs(instructionSetFlags); #ifdef TARGET_XARCH if (!compIsForInlining()) { if (canUseVexEncoding()) { codeGen->GetEmitter()->SetUseVEXEncoding(true); // Assume each JITted method does not contain AVX instruction at first codeGen->GetEmitter()->SetContainsAVX(false); codeGen->GetEmitter()->SetContains256bitAVX(false); } } #endif // TARGET_XARCH } bool Compiler::notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const { const char* isaString = InstructionSetToString(isa); JITDUMP("Notify VM instruction set (%s) %s be supported.\n", isaString, supported ? "must" : "must not"); return info.compCompHnd->notifyInstructionSetUsage(isa, supported); } #ifdef PROFILING_SUPPORTED // A Dummy routine to receive Enter/Leave/Tailcall profiler callbacks. // These are used when complus_JitEltHookEnabled=1 #ifdef TARGET_AMD64 void DummyProfilerELTStub(UINT_PTR ProfilerHandle, UINT_PTR callerSP) { return; } #else //! TARGET_AMD64 void DummyProfilerELTStub(UINT_PTR ProfilerHandle) { return; } #endif //! TARGET_AMD64 #endif // PROFILING_SUPPORTED bool Compiler::compShouldThrowOnNoway( #ifdef FEATURE_TRACELOGGING const char* filename, unsigned line #endif ) { #ifdef FEATURE_TRACELOGGING compJitTelemetry.NotifyNowayAssert(filename, line); #endif // In min opts, we don't want the noway assert to go through the exception // path. Instead we want it to just silently go through codegen for // compat reasons. return !opts.MinOpts(); } // ConfigInteger does not offer an option for decimal flags. Any numbers are interpreted as hex. // I could add the decimal option to ConfigInteger or I could write a function to reinterpret this // value as the user intended. unsigned ReinterpretHexAsDecimal(unsigned in) { // ex: in: 0x100 returns: 100 unsigned result = 0; unsigned index = 1; // default value if (in == INT_MAX) { return in; } while (in) { unsigned digit = in % 16; in >>= 4; assert(digit < 10); result += digit * index; index *= 10; } return result; } void Compiler::compInitOptions(JitFlags* jitFlags) { #ifdef UNIX_AMD64_ABI opts.compNeedToAlignFrame = false; #endif // UNIX_AMD64_ABI memset(&opts, 0, sizeof(opts)); if (compIsForInlining()) { // The following flags are lost when inlining. (They are removed in // Compiler::fgInvokeInlineeCompiler().) assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_ENTERLEAVE)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_EnC)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_TRACK_TRANSITIONS)); } opts.jitFlags = jitFlags; opts.compFlags = CLFLG_MAXOPT; // Default value is for full optimization if (jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE) || jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) || jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { opts.compFlags = CLFLG_MINOPT; } // Don't optimize .cctors (except prejit) or if we're an inlinee else if (!jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && ((info.compFlags & FLG_CCTOR) == FLG_CCTOR) && !compIsForInlining()) { opts.compFlags = CLFLG_MINOPT; } // Default value is to generate a blend of size and speed optimizations // opts.compCodeOpt = BLENDED_CODE; // If the EE sets SIZE_OPT or if we are compiling a Class constructor // we will optimize for code size at the expense of speed // if (jitFlags->IsSet(JitFlags::JIT_FLAG_SIZE_OPT) || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR)) { opts.compCodeOpt = SMALL_CODE; } // // If the EE sets SPEED_OPT we will optimize for speed at the expense of code size // else if (jitFlags->IsSet(JitFlags::JIT_FLAG_SPEED_OPT) || (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1) && !jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT))) { opts.compCodeOpt = FAST_CODE; assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_SIZE_OPT)); } //------------------------------------------------------------------------- opts.compDbgCode = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE); opts.compDbgInfo = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_INFO); opts.compDbgEnC = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_EnC); #ifdef DEBUG opts.compJitAlignLoopAdaptive = JitConfig.JitAlignLoopAdaptive() == 1; opts.compJitAlignLoopBoundary = (unsigned short)JitConfig.JitAlignLoopBoundary(); opts.compJitAlignLoopMinBlockWeight = (unsigned short)JitConfig.JitAlignLoopMinBlockWeight(); opts.compJitAlignLoopForJcc = JitConfig.JitAlignLoopForJcc() == 1; opts.compJitAlignLoopMaxCodeSize = (unsigned short)JitConfig.JitAlignLoopMaxCodeSize(); opts.compJitHideAlignBehindJmp = JitConfig.JitHideAlignBehindJmp() == 1; opts.compJitOptimizeStructHiddenBuffer = JitConfig.JitOptimizeStructHiddenBuffer() == 1; #else opts.compJitAlignLoopAdaptive = true; opts.compJitAlignLoopBoundary = DEFAULT_ALIGN_LOOP_BOUNDARY; opts.compJitAlignLoopMinBlockWeight = DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT; opts.compJitAlignLoopMaxCodeSize = DEFAULT_MAX_LOOPSIZE_FOR_ALIGN; opts.compJitHideAlignBehindJmp = true; opts.compJitOptimizeStructHiddenBuffer = true; #endif #ifdef TARGET_XARCH if (opts.compJitAlignLoopAdaptive) { // For adaptive alignment, padding limit is equal to the max instruction encoding // size which is 15 bytes. Hence (32 >> 1) - 1 = 15 bytes. opts.compJitAlignPaddingLimit = (opts.compJitAlignLoopBoundary >> 1) - 1; } else { // For non-adaptive alignment, padding limit is 1 less than the alignment boundary // specified. opts.compJitAlignPaddingLimit = opts.compJitAlignLoopBoundary - 1; } #elif TARGET_ARM64 if (opts.compJitAlignLoopAdaptive) { // For adaptive alignment, padding limit is same as specified by the alignment // boundary because all instructions are 4 bytes long. Hence (32 >> 1) = 16 bytes. opts.compJitAlignPaddingLimit = (opts.compJitAlignLoopBoundary >> 1); } else { // For non-adaptive, padding limit is same as specified by the alignment. opts.compJitAlignPaddingLimit = opts.compJitAlignLoopBoundary; } #endif assert(isPow2(opts.compJitAlignLoopBoundary)); #ifdef TARGET_ARM64 // The minimum encoding size for Arm64 is 4 bytes. assert(opts.compJitAlignLoopBoundary >= 4); #endif #if REGEN_SHORTCUTS || REGEN_CALLPAT // We never want to have debugging enabled when regenerating GC encoding patterns opts.compDbgCode = false; opts.compDbgInfo = false; opts.compDbgEnC = false; #endif compSetProcessor(); #ifdef DEBUG opts.dspOrder = false; // Optionally suppress inliner compiler instance dumping. // if (compIsForInlining()) { if (JitConfig.JitDumpInlinePhases() > 0) { verbose = impInlineInfo->InlinerCompiler->verbose; } else { verbose = false; } } else { verbose = false; codeGen->setVerbose(false); } verboseTrees = verbose && shouldUseVerboseTrees(); verboseSsa = verbose && shouldUseVerboseSsa(); asciiTrees = shouldDumpASCIITrees(); opts.dspDiffable = compIsForInlining() ? impInlineInfo->InlinerCompiler->opts.dspDiffable : false; #endif opts.altJit = false; #if defined(LATE_DISASM) && !defined(DEBUG) // For non-debug builds with the late disassembler built in, we currently always do late disassembly // (we have no way to determine when not to, since we don't have class/method names). // In the DEBUG case, this is initialized to false, below. opts.doLateDisasm = true; #endif #ifdef DEBUG const JitConfigValues::MethodSet* pfAltJit; if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { pfAltJit = &JitConfig.AltJitNgen(); } else { pfAltJit = &JitConfig.AltJit(); } if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { if (pfAltJit->contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.altJit = true; } unsigned altJitLimit = ReinterpretHexAsDecimal(JitConfig.AltJitLimit()); if (altJitLimit > 0 && Compiler::jitTotalMethodCompiled >= altJitLimit) { opts.altJit = false; } } #else // !DEBUG const char* altJitVal; if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { altJitVal = JitConfig.AltJitNgen().list(); } else { altJitVal = JitConfig.AltJit().list(); } if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { // In release mode, you either get all methods or no methods. You must use "*" as the parameter, or we ignore // it. You don't get to give a regular expression of methods to match. // (Partially, this is because we haven't computed and stored the method and class name except in debug, and it // might be expensive to do so.) if ((altJitVal != nullptr) && (strcmp(altJitVal, "*") == 0)) { opts.altJit = true; } } #endif // !DEBUG // Take care of COMPlus_AltJitExcludeAssemblies. if (opts.altJit) { // First, initialize the AltJitExcludeAssemblies list, but only do it once. if (!s_pAltJitExcludeAssembliesListInitialized) { const WCHAR* wszAltJitExcludeAssemblyList = JitConfig.AltJitExcludeAssemblies(); if (wszAltJitExcludeAssemblyList != nullptr) { // NOTE: The Assembly name list is allocated in the process heap, not in the no-release heap, which is // reclaimed // for every compilation. This is ok because we only allocate once, due to the static. s_pAltJitExcludeAssembliesList = new (HostAllocator::getHostAllocator()) AssemblyNamesList2(wszAltJitExcludeAssemblyList, HostAllocator::getHostAllocator()); } s_pAltJitExcludeAssembliesListInitialized = true; } if (s_pAltJitExcludeAssembliesList != nullptr) { // We have an exclusion list. See if this method is in an assembly that is on the list. // Note that we check this for every method, since we might inline across modules, and // if the inlinee module is on the list, we don't want to use the altjit for it. const char* methodAssemblyName = info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); if (s_pAltJitExcludeAssembliesList->IsInList(methodAssemblyName)) { opts.altJit = false; } } } #ifdef DEBUG bool altJitConfig = !pfAltJit->isEmpty(); // If we have a non-empty AltJit config then we change all of these other // config values to refer only to the AltJit. Otherwise, a lot of COMPlus_* variables // would apply to both the altjit and the normal JIT, but we only care about // debugging the altjit if the COMPlus_AltJit configuration is set. // if (compIsForImportOnly() && (!altJitConfig || opts.altJit)) { if (JitConfig.JitImportBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { assert(!"JitImportBreak reached"); } } bool verboseDump = false; if (!altJitConfig || opts.altJit) { // We should only enable 'verboseDump' when we are actually compiling a matching method // and not enable it when we are just considering inlining a matching method. // if (!compIsForInlining()) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if (JitConfig.NgenDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { verboseDump = true; } unsigned ngenHashDumpVal = (unsigned)JitConfig.NgenHashDump(); if ((ngenHashDumpVal != (DWORD)-1) && (ngenHashDumpVal == info.compMethodHash())) { verboseDump = true; } } else { if (JitConfig.JitDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { verboseDump = true; } unsigned jitHashDumpVal = (unsigned)JitConfig.JitHashDump(); if ((jitHashDumpVal != (DWORD)-1) && (jitHashDumpVal == info.compMethodHash())) { verboseDump = true; } } } } // Optionally suppress dumping Tier0 jit requests. // if (verboseDump && jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { verboseDump = (JitConfig.JitDumpTier0() > 0); } // Optionally suppress dumping except for a specific OSR jit request. // const int dumpAtOSROffset = JitConfig.JitDumpAtOSROffset(); if (verboseDump && (dumpAtOSROffset != -1)) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { verboseDump = (((IL_OFFSET)dumpAtOSROffset) == info.compILEntry); } else { verboseDump = false; } } if (verboseDump) { verbose = true; } #endif // DEBUG #ifdef FEATURE_SIMD setUsesSIMDTypes(false); #endif // FEATURE_SIMD lvaEnregEHVars = (compEnregLocals() && JitConfig.EnableEHWriteThru()); lvaEnregMultiRegVars = (compEnregLocals() && JitConfig.EnableMultiRegLocals()); if (compIsForImportOnly()) { return; } #if FEATURE_TAILCALL_OPT // By default opportunistic tail call optimization is enabled. // Recognition is done in the importer so this must be set for // inlinees as well. opts.compTailCallOpt = true; #endif // FEATURE_TAILCALL_OPT #if FEATURE_FASTTAILCALL // By default fast tail calls are enabled. opts.compFastTailCalls = true; #endif // FEATURE_FASTTAILCALL // Profile data // fgPgoSchema = nullptr; fgPgoData = nullptr; fgPgoSchemaCount = 0; fgPgoQueryResult = E_FAIL; fgPgoFailReason = nullptr; fgPgoSource = ICorJitInfo::PgoSource::Unknown; if (jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT)) { fgPgoQueryResult = info.compCompHnd->getPgoInstrumentationResults(info.compMethodHnd, &fgPgoSchema, &fgPgoSchemaCount, &fgPgoData, &fgPgoSource); // a failed result that also has a non-NULL fgPgoSchema // indicates that the ILSize for the method no longer matches // the ILSize for the method when profile data was collected. // // We will discard the IBC data in this case // if (FAILED(fgPgoQueryResult)) { fgPgoFailReason = (fgPgoSchema != nullptr) ? "No matching PGO data" : "No PGO data"; fgPgoData = nullptr; fgPgoSchema = nullptr; } // Optionally, disable use of profile data. // else if (JitConfig.JitDisablePgo() > 0) { fgPgoFailReason = "PGO data available, but JitDisablePgo > 0"; fgPgoQueryResult = E_FAIL; fgPgoData = nullptr; fgPgoSchema = nullptr; fgPgoDisabled = true; } #ifdef DEBUG // Optionally, enable use of profile data for only some methods. // else { static ConfigMethodRange JitEnablePgoRange; JitEnablePgoRange.EnsureInit(JitConfig.JitEnablePgoRange()); // Base this decision on the root method hash, so a method either sees all available // profile data (including that for inlinees), or none of it. // const unsigned hash = impInlineRoot()->info.compMethodHash(); if (!JitEnablePgoRange.Contains(hash)) { fgPgoFailReason = "PGO data available, but method hash NOT within JitEnablePgoRange"; fgPgoQueryResult = E_FAIL; fgPgoData = nullptr; fgPgoSchema = nullptr; fgPgoDisabled = true; } } // A successful result implies a non-NULL fgPgoSchema // if (SUCCEEDED(fgPgoQueryResult)) { assert(fgPgoSchema != nullptr); } // A failed result implies a NULL fgPgoSchema // see implementation of Compiler::fgHaveProfileData() // if (FAILED(fgPgoQueryResult)) { assert(fgPgoSchema == nullptr); } #endif } if (compIsForInlining()) { return; } // The rest of the opts fields that we initialize here // should only be used when we generate code for the method // They should not be used when importing or inlining CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_TAILCALL_OPT opts.compTailCallLoopOpt = true; #endif // FEATURE_TAILCALL_OPT opts.genFPorder = true; opts.genFPopt = true; opts.instrCount = 0; opts.lvRefCount = 0; #ifdef PROFILING_SUPPORTED opts.compJitELTHookEnabled = false; #endif // PROFILING_SUPPORTED #if defined(TARGET_ARM64) // 0 is default: use the appropriate frame type based on the function. opts.compJitSaveFpLrWithCalleeSavedRegisters = 0; #endif // defined(TARGET_ARM64) #ifdef DEBUG opts.dspInstrs = false; opts.dspLines = false; opts.varNames = false; opts.dmpHex = false; opts.disAsm = false; opts.disAsmSpilled = false; opts.disDiffable = false; opts.disAddr = false; opts.disAlignment = false; opts.dspCode = false; opts.dspEHTable = false; opts.dspDebugInfo = false; opts.dspGCtbls = false; opts.disAsm2 = false; opts.dspUnwind = false; opts.compLongAddress = false; opts.optRepeat = false; #ifdef LATE_DISASM opts.doLateDisasm = false; #endif // LATE_DISASM compDebugBreak = false; // If we have a non-empty AltJit config then we change all of these other // config values to refer only to the AltJit. // if (!altJitConfig || opts.altJit) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if ((JitConfig.NgenOrder() & 1) == 1) { opts.dspOrder = true; } if (JitConfig.NgenGCDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspGCtbls = true; } if (JitConfig.NgenDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.disAsm = true; } if (JitConfig.NgenDisasm().contains("SPILLED", nullptr, nullptr)) { opts.disAsmSpilled = true; } if (JitConfig.NgenUnwindDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspUnwind = true; } if (JitConfig.NgenEHDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspEHTable = true; } if (JitConfig.NgenDebugDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspDebugInfo = true; } } else { bool disEnabled = true; // Setup assembly name list for disassembly, if not already set up. if (!s_pJitDisasmIncludeAssembliesListInitialized) { const WCHAR* assemblyNameList = JitConfig.JitDisasmAssemblies(); if (assemblyNameList != nullptr) { s_pJitDisasmIncludeAssembliesList = new (HostAllocator::getHostAllocator()) AssemblyNamesList2(assemblyNameList, HostAllocator::getHostAllocator()); } s_pJitDisasmIncludeAssembliesListInitialized = true; } // If we have an assembly name list for disassembly, also check this method's assembly. if (s_pJitDisasmIncludeAssembliesList != nullptr && !s_pJitDisasmIncludeAssembliesList->IsEmpty()) { const char* assemblyName = info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); if (!s_pJitDisasmIncludeAssembliesList->IsInList(assemblyName)) { disEnabled = false; } } if (disEnabled) { if ((JitConfig.JitOrder() & 1) == 1) { opts.dspOrder = true; } if (JitConfig.JitGCDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspGCtbls = true; } if (JitConfig.JitDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.disAsm = true; } if (JitConfig.JitDisasm().contains("SPILLED", nullptr, nullptr)) { opts.disAsmSpilled = true; } if (JitConfig.JitUnwindDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspUnwind = true; } if (JitConfig.JitEHDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspEHTable = true; } if (JitConfig.JitDebugDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspDebugInfo = true; } } } if (opts.disAsm && JitConfig.JitDisasmWithGC()) { opts.disasmWithGC = true; } #ifdef LATE_DISASM if (JitConfig.JitLateDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) opts.doLateDisasm = true; #endif // LATE_DISASM // This one applies to both Ngen/Jit Disasm output: COMPlus_JitDiffableDasm=1 if (JitConfig.DiffableDasm() != 0) { opts.disDiffable = true; opts.dspDiffable = true; } // This one applies to both Ngen/Jit Disasm output: COMPlus_JitDasmWithAddress=1 if (JitConfig.JitDasmWithAddress() != 0) { opts.disAddr = true; } if (JitConfig.JitDasmWithAlignmentBoundaries() != 0) { opts.disAlignment = true; } if (JitConfig.JitLongAddress() != 0) { opts.compLongAddress = true; } if (JitConfig.JitOptRepeat().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.optRepeat = true; } } if (verboseDump) { opts.dspCode = true; opts.dspEHTable = true; opts.dspGCtbls = true; opts.disAsm2 = true; opts.dspUnwind = true; verbose = true; verboseTrees = shouldUseVerboseTrees(); verboseSsa = shouldUseVerboseSsa(); codeGen->setVerbose(true); } treesBeforeAfterMorph = (JitConfig.TreesBeforeAfterMorph() == 1); morphNum = 0; // Initialize the morphed-trees counting. expensiveDebugCheckLevel = JitConfig.JitExpensiveDebugCheckLevel(); if (expensiveDebugCheckLevel == 0) { // If we're in a stress mode that modifies the flowgraph, make 1 the default. if (fgStressBBProf() || compStressCompile(STRESS_DO_WHILE_LOOPS, 30)) { expensiveDebugCheckLevel = 1; } } if (verbose) { printf("****** START compiling %s (MethodHash=%08x)\n", info.compFullName, info.compMethodHash()); printf("Generating code for %s %s\n", Target::g_tgtPlatformName(), Target::g_tgtCPUName); printf(""); // in our logic this causes a flush } if (JitConfig.JitBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { assert(!"JitBreak reached"); } unsigned jitHashBreakVal = (unsigned)JitConfig.JitHashBreak(); if ((jitHashBreakVal != (DWORD)-1) && (jitHashBreakVal == info.compMethodHash())) { assert(!"JitHashBreak reached"); } if (verbose || JitConfig.JitDebugBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args) || JitConfig.JitBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { compDebugBreak = true; } memset(compActiveStressModes, 0, sizeof(compActiveStressModes)); // Read function list, if not already read, and there exists such a list. if (!s_pJitFunctionFileInitialized) { const WCHAR* functionFileName = JitConfig.JitFunctionFile(); if (functionFileName != nullptr) { s_pJitMethodSet = new (HostAllocator::getHostAllocator()) MethodSet(functionFileName, HostAllocator::getHostAllocator()); } s_pJitFunctionFileInitialized = true; } #endif // DEBUG //------------------------------------------------------------------------- #ifdef DEBUG assert(!codeGen->isGCTypeFixed()); opts.compGcChecks = (JitConfig.JitGCChecks() != 0) || compStressCompile(STRESS_GENERIC_VARN, 5); #endif #if defined(DEBUG) && defined(TARGET_XARCH) enum { STACK_CHECK_ON_RETURN = 0x1, STACK_CHECK_ON_CALL = 0x2, STACK_CHECK_ALL = 0x3 }; DWORD dwJitStackChecks = JitConfig.JitStackChecks(); if (compStressCompile(STRESS_GENERIC_VARN, 5)) { dwJitStackChecks = STACK_CHECK_ALL; } opts.compStackCheckOnRet = (dwJitStackChecks & DWORD(STACK_CHECK_ON_RETURN)) != 0; #if defined(TARGET_X86) opts.compStackCheckOnCall = (dwJitStackChecks & DWORD(STACK_CHECK_ON_CALL)) != 0; #endif // defined(TARGET_X86) #endif // defined(DEBUG) && defined(TARGET_XARCH) #if MEASURE_MEM_ALLOC s_dspMemStats = (JitConfig.DisplayMemStats() != 0); #endif #ifdef PROFILING_SUPPORTED opts.compNoPInvokeInlineCB = jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_NO_PINVOKE_INLINE); // Cache the profiler handle if (jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_ENTERLEAVE)) { bool hookNeeded; bool indirected; info.compCompHnd->GetProfilingHandle(&hookNeeded, &compProfilerMethHnd, &indirected); compProfilerHookNeeded = !!hookNeeded; compProfilerMethHndIndirected = !!indirected; } else { compProfilerHookNeeded = false; compProfilerMethHnd = nullptr; compProfilerMethHndIndirected = false; } // Honour COMPlus_JitELTHookEnabled or STRESS_PROFILER_CALLBACKS stress mode // only if VM has not asked us to generate profiler hooks in the first place. // That is, override VM only if it hasn't asked for a profiler callback for this method. // Don't run this stress mode when pre-JITing, as we would need to emit a relocation // for the call to the fake ELT hook, which wouldn't make sense, as we can't store that // in the pre-JIT image. if (!compProfilerHookNeeded) { if ((JitConfig.JitELTHookEnabled() != 0) || (!jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && compStressCompile(STRESS_PROFILER_CALLBACKS, 5))) { opts.compJitELTHookEnabled = true; } } // TBD: Exclude PInvoke stubs if (opts.compJitELTHookEnabled) { compProfilerMethHnd = (void*)DummyProfilerELTStub; compProfilerMethHndIndirected = false; } #endif // PROFILING_SUPPORTED #if FEATURE_TAILCALL_OPT const WCHAR* strTailCallOpt = JitConfig.TailCallOpt(); if (strTailCallOpt != nullptr) { opts.compTailCallOpt = (UINT)_wtoi(strTailCallOpt) != 0; } if (JitConfig.TailCallLoopOpt() == 0) { opts.compTailCallLoopOpt = false; } #endif #if FEATURE_FASTTAILCALL if (JitConfig.FastTailCalls() == 0) { opts.compFastTailCalls = false; } #endif // FEATURE_FASTTAILCALL #ifdef CONFIGURABLE_ARM_ABI opts.compUseSoftFP = jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI); unsigned int softFPConfig = opts.compUseSoftFP ? 2 : 1; unsigned int oldSoftFPConfig = InterlockedCompareExchange(&GlobalJitOptions::compUseSoftFPConfigured, softFPConfig, 0); if (oldSoftFPConfig != softFPConfig && oldSoftFPConfig != 0) { // There are no current scenarios where the abi can change during the lifetime of a process // that uses the JIT. If such a change occurs, either compFeatureHfa will need to change to a TLS static // or we will need to have some means to reset the flag safely. NO_WAY("SoftFP ABI setting changed during lifetime of process"); } GlobalJitOptions::compFeatureHfa = !opts.compUseSoftFP; #elif defined(ARM_SOFTFP) && defined(TARGET_ARM) // Armel is unconditionally enabled in the JIT. Verify that the VM side agrees. assert(jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI)); #elif defined(TARGET_ARM) assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI)); #endif // CONFIGURABLE_ARM_ABI opts.compScopeInfo = opts.compDbgInfo; #ifdef LATE_DISASM codeGen->getDisAssembler().disOpenForLateDisAsm(info.compMethodName, info.compClassName, info.compMethodInfo->args.pSig); #endif //------------------------------------------------------------------------- opts.compReloc = jitFlags->IsSet(JitFlags::JIT_FLAG_RELOC); #ifdef DEBUG #if defined(TARGET_XARCH) // Whether encoding of absolute addr as PC-rel offset is enabled opts.compEnablePCRelAddr = (JitConfig.EnablePCRelAddr() != 0); #endif #endif // DEBUG opts.compProcedureSplitting = jitFlags->IsSet(JitFlags::JIT_FLAG_PROCSPLIT); #ifdef TARGET_ARM64 // TODO-ARM64-NYI: enable hot/cold splitting opts.compProcedureSplitting = false; #endif // TARGET_ARM64 #ifdef DEBUG opts.compProcedureSplittingEH = opts.compProcedureSplitting; #endif // DEBUG if (opts.compProcedureSplitting) { // Note that opts.compdbgCode is true under ngen for checked assemblies! opts.compProcedureSplitting = !opts.compDbgCode; #ifdef DEBUG // JitForceProcedureSplitting is used to force procedure splitting on checked assemblies. // This is useful for debugging on a checked build. Note that we still only do procedure // splitting in the zapper. if (JitConfig.JitForceProcedureSplitting().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplitting = true; } // JitNoProcedureSplitting will always disable procedure splitting. if (JitConfig.JitNoProcedureSplitting().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplitting = false; } // // JitNoProcedureSplittingEH will disable procedure splitting in functions with EH. if (JitConfig.JitNoProcedureSplittingEH().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplittingEH = false; } #endif } #ifdef DEBUG // Now, set compMaxUncheckedOffsetForNullObject for STRESS_NULL_OBJECT_CHECK if (compStressCompile(STRESS_NULL_OBJECT_CHECK, 30)) { compMaxUncheckedOffsetForNullObject = (size_t)JitConfig.JitMaxUncheckedOffset(); if (verbose) { printf("STRESS_NULL_OBJECT_CHECK: compMaxUncheckedOffsetForNullObject=0x%X\n", compMaxUncheckedOffsetForNullObject); } } if (verbose) { // If we are compiling for a specific tier, make that very obvious in the output. // Note that we don't expect multiple TIER flags to be set at one time, but there // is nothing preventing that. if (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { printf("OPTIONS: Tier-0 compilation (set COMPlus_TieredCompilation=0 to disable)\n"); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1)) { printf("OPTIONS: Tier-1 compilation\n"); } if (compSwitchedToOptimized) { printf("OPTIONS: Tier-0 compilation, switched to FullOpts\n"); } if (compSwitchedToMinOpts) { printf("OPTIONS: Tier-1/FullOpts compilation, switched to MinOpts\n"); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { printf("OPTIONS: OSR variant with entry point 0x%x\n", info.compILEntry); } printf("OPTIONS: compCodeOpt = %s\n", (opts.compCodeOpt == BLENDED_CODE) ? "BLENDED_CODE" : (opts.compCodeOpt == SMALL_CODE) ? "SMALL_CODE" : (opts.compCodeOpt == FAST_CODE) ? "FAST_CODE" : "UNKNOWN_CODE"); printf("OPTIONS: compDbgCode = %s\n", dspBool(opts.compDbgCode)); printf("OPTIONS: compDbgInfo = %s\n", dspBool(opts.compDbgInfo)); printf("OPTIONS: compDbgEnC = %s\n", dspBool(opts.compDbgEnC)); printf("OPTIONS: compProcedureSplitting = %s\n", dspBool(opts.compProcedureSplitting)); printf("OPTIONS: compProcedureSplittingEH = %s\n", dspBool(opts.compProcedureSplittingEH)); if (jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) && fgHaveProfileData()) { printf("OPTIONS: optimized using %s profile data\n", pgoSourceToString(fgPgoSource)); } if (fgPgoFailReason != nullptr) { printf("OPTIONS: %s\n", fgPgoFailReason); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { printf("OPTIONS: Jit invoked for ngen\n"); } } #endif #ifdef PROFILING_SUPPORTED #ifdef UNIX_AMD64_ABI if (compIsProfilerHookNeeded()) { opts.compNeedToAlignFrame = true; } #endif // UNIX_AMD64_ABI #endif #if defined(DEBUG) && defined(TARGET_ARM64) if ((s_pJitMethodSet == nullptr) || s_pJitMethodSet->IsActiveMethod(info.compFullName, info.compMethodHash())) { opts.compJitSaveFpLrWithCalleeSavedRegisters = JitConfig.JitSaveFpLrWithCalleeSavedRegisters(); } #endif // defined(DEBUG) && defined(TARGET_ARM64) } #ifdef DEBUG bool Compiler::compJitHaltMethod() { /* This method returns true when we use an INS_BREAKPOINT to allow us to step into the generated native code */ /* Note that this these two "Jit" environment variables also work for ngen images */ if (JitConfig.JitHalt().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } /* Use this Hash variant when there are a lot of method with the same name and different signatures */ unsigned fJitHashHaltVal = (unsigned)JitConfig.JitHashHalt(); if ((fJitHashHaltVal != (unsigned)-1) && (fJitHashHaltVal == info.compMethodHash())) { return true; } return false; } /***************************************************************************** * Should we use a "stress-mode" for the given stressArea. We have different * areas to allow the areas to be mixed in different combinations in * different methods. * 'weight' indicates how often (as a percentage) the area should be stressed. * It should reflect the usefulness:overhead ratio. */ const LPCWSTR Compiler::s_compStressModeNames[STRESS_COUNT + 1] = { #define STRESS_MODE(mode) W("STRESS_") W(#mode), STRESS_MODES #undef STRESS_MODE }; //------------------------------------------------------------------------ // compStressCompile: determine if a stress mode should be enabled // // Arguments: // stressArea - stress mode to possibly enable // weight - percent of time this mode should be turned on // (range 0 to 100); weight 0 effectively disables // // Returns: // true if this stress mode is enabled // // Notes: // Methods may be excluded from stress via name or hash. // // Particular stress modes may be disabled or forcibly enabled. // // With JitStress=2, some stress modes are enabled regardless of weight; // these modes are the ones after COUNT_VARN in the enumeration. // // For other modes or for nonzero JitStress values, stress will be // enabled selectively for roughly weight% of methods. // bool Compiler::compStressCompile(compStressArea stressArea, unsigned weight) { // This can be called early, before info is fully set up. if ((info.compMethodName == nullptr) || (info.compFullName == nullptr)) { return false; } // Inlinees defer to the root method for stress, so that we can // more easily isolate methods that cause stress failures. if (compIsForInlining()) { return impInlineRoot()->compStressCompile(stressArea, weight); } const bool doStress = compStressCompileHelper(stressArea, weight); if (doStress && !compActiveStressModes[stressArea]) { if (verbose) { printf("\n\n*** JitStress: %ws ***\n\n", s_compStressModeNames[stressArea]); } compActiveStressModes[stressArea] = 1; } return doStress; } //------------------------------------------------------------------------ // compStressCompileHelper: helper to determine if a stress mode should be enabled // // Arguments: // stressArea - stress mode to possibly enable // weight - percent of time this mode should be turned on // (range 0 to 100); weight 0 effectively disables // // Returns: // true if this stress mode is enabled // // Notes: // See compStressCompile // bool Compiler::compStressCompileHelper(compStressArea stressArea, unsigned weight) { if (!bRangeAllowStress) { return false; } if (!JitConfig.JitStressOnly().isEmpty() && !JitConfig.JitStressOnly().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return false; } // Does user explicitly prevent using this STRESS_MODE through the command line? const WCHAR* strStressModeNamesNot = JitConfig.JitStressModeNamesNot(); if ((strStressModeNamesNot != nullptr) && (wcsstr(strStressModeNamesNot, s_compStressModeNames[stressArea]) != nullptr)) { return false; } // Does user explicitly set this STRESS_MODE through the command line? const WCHAR* strStressModeNames = JitConfig.JitStressModeNames(); if (strStressModeNames != nullptr) { if (wcsstr(strStressModeNames, s_compStressModeNames[stressArea]) != nullptr) { return true; } // This stress mode name did not match anything in the stress // mode allowlist. If user has requested only enable mode, // don't allow this stress mode to turn on. const bool onlyEnableMode = JitConfig.JitStressModeNamesOnly() != 0; if (onlyEnableMode) { return false; } } // 0: No stress (Except when explicitly set in complus_JitStressModeNames) // !=2: Vary stress. Performance will be slightly/moderately degraded // 2: Check-all stress. Performance will be REALLY horrible const int stressLevel = getJitStressLevel(); assert(weight <= MAX_STRESS_WEIGHT); // Check for boundary conditions if (stressLevel == 0 || weight == 0) { return false; } // Should we allow unlimited stress ? if ((stressArea > STRESS_COUNT_VARN) && (stressLevel == 2)) { return true; } if (weight == MAX_STRESS_WEIGHT) { return true; } // Get a hash which can be compared with 'weight' assert(stressArea != 0); const unsigned hash = (info.compMethodHash() ^ stressArea ^ stressLevel) % MAX_STRESS_WEIGHT; assert(hash < MAX_STRESS_WEIGHT && weight <= MAX_STRESS_WEIGHT); return (hash < weight); } //------------------------------------------------------------------------ // compPromoteFewerStructs: helper to determine if the local // should not be promoted under a stress mode. // // Arguments: // lclNum - local number to test // // Returns: // true if this local should not be promoted. // // Notes: // Reject ~50% of the potential promotions if STRESS_PROMOTE_FEWER_STRUCTS is active. // bool Compiler::compPromoteFewerStructs(unsigned lclNum) { bool rejectThisPromo = false; const bool promoteLess = compStressCompile(STRESS_PROMOTE_FEWER_STRUCTS, 50); if (promoteLess) { rejectThisPromo = (((info.compMethodHash() ^ lclNum) & 1) == 0); } return rejectThisPromo; } #endif // DEBUG void Compiler::compInitDebuggingInfo() { #ifdef DEBUG if (verbose) { printf("*************** In compInitDebuggingInfo() for %s\n", info.compFullName); } #endif /*------------------------------------------------------------------------- * * Get hold of the local variable records, if there are any */ info.compVarScopesCount = 0; if (opts.compScopeInfo) { eeGetVars(); } compInitVarScopeMap(); if (opts.compScopeInfo || opts.compDbgCode) { compInitScopeLists(); } if (opts.compDbgCode && (info.compVarScopesCount > 0)) { /* Create a new empty basic block. fgExtendDbgLifetimes() may add initialization of variables which are in scope right from the start of the (real) first BB (and therefore artificially marked as alive) into this block. */ fgEnsureFirstBBisScratch(); fgNewStmtAtEnd(fgFirstBB, gtNewNothingNode()); JITDUMP("Debuggable code - Add new %s to perform initialization of variables\n", fgFirstBB->dspToString()); } /*------------------------------------------------------------------------- * * Read the stmt-offsets table and the line-number table */ info.compStmtOffsetsImplicit = ICorDebugInfo::NO_BOUNDARIES; // We can only report debug info for EnC at places where the stack is empty. // Actually, at places where there are not live temps. Else, we won't be able // to map between the old and the new versions correctly as we won't have // any info for the live temps. assert(!opts.compDbgEnC || !opts.compDbgInfo || 0 == (info.compStmtOffsetsImplicit & ~ICorDebugInfo::STACK_EMPTY_BOUNDARIES)); info.compStmtOffsetsCount = 0; if (opts.compDbgInfo) { /* Get hold of the line# records, if there are any */ eeGetStmtOffsets(); #ifdef DEBUG if (verbose) { printf("info.compStmtOffsetsCount = %d\n", info.compStmtOffsetsCount); printf("info.compStmtOffsetsImplicit = %04Xh", info.compStmtOffsetsImplicit); if (info.compStmtOffsetsImplicit) { printf(" ( "); if (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) { printf("STACK_EMPTY "); } if (info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) { printf("NOP "); } if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { printf("CALL_SITE "); } printf(")"); } printf("\n"); IL_OFFSET* pOffs = info.compStmtOffsets; for (unsigned i = 0; i < info.compStmtOffsetsCount; i++, pOffs++) { printf("%02d) IL_%04Xh\n", i, *pOffs); } } #endif } } void Compiler::compSetOptimizationLevel() { bool theMinOptsValue; #pragma warning(suppress : 4101) unsigned jitMinOpts; if (compIsForInlining()) { theMinOptsValue = impInlineInfo->InlinerCompiler->opts.MinOpts(); goto _SetMinOpts; } theMinOptsValue = false; if (opts.compFlags == CLFLG_MINOPT) { JITLOG((LL_INFO100, "CLFLG_MINOPT set for method %s\n", info.compFullName)); theMinOptsValue = true; } #ifdef DEBUG jitMinOpts = JitConfig.JitMinOpts(); if (!theMinOptsValue && (jitMinOpts > 0)) { // jitTotalMethodCompiled does not include the method that is being compiled now, so make +1. unsigned methodCount = Compiler::jitTotalMethodCompiled + 1; unsigned methodCountMask = methodCount & 0xFFF; unsigned kind = (jitMinOpts & 0xF000000) >> 24; switch (kind) { default: if (jitMinOpts <= methodCount) { if (verbose) { printf(" Optimizations disabled by JitMinOpts and methodCount\n"); } theMinOptsValue = true; } break; case 0xD: { unsigned firstMinopts = (jitMinOpts >> 12) & 0xFFF; unsigned secondMinopts = (jitMinOpts >> 0) & 0xFFF; if ((firstMinopts == methodCountMask) || (secondMinopts == methodCountMask)) { if (verbose) { printf("0xD: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; case 0xE: { unsigned startMinopts = (jitMinOpts >> 12) & 0xFFF; unsigned endMinopts = (jitMinOpts >> 0) & 0xFFF; if ((startMinopts <= methodCountMask) && (endMinopts >= methodCountMask)) { if (verbose) { printf("0xE: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; case 0xF: { unsigned bitsZero = (jitMinOpts >> 12) & 0xFFF; unsigned bitsOne = (jitMinOpts >> 0) & 0xFFF; if (((methodCountMask & bitsOne) == bitsOne) && ((~methodCountMask & bitsZero) == bitsZero)) { if (verbose) { printf("0xF: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; } } if (!theMinOptsValue) { if (JitConfig.JitMinOptsName().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { theMinOptsValue = true; } } #if 0 // The code in this #if can be used to debug optimization issues according to method hash. // To use, uncomment, rebuild and set environment variables minoptshashlo and minoptshashhi. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("minoptshashlo"); unsigned methHashLo = 0; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); char* histr = getenv("minoptshashhi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); if (methHash >= methHashLo && methHash <= methHashHi) { printf("MinOpts for method %s, hash = %08x.\n", info.compFullName, methHash); printf(""); // in our logic this causes a flush theMinOptsValue = true; } } } #endif #endif if (compStressCompile(STRESS_MIN_OPTS, 5)) { theMinOptsValue = true; } // For PREJIT we never drop down to MinOpts // unless unless CLFLG_MINOPT is set else if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if ((unsigned)JitConfig.JitMinOptsCodeSize() < info.compILCodeSize) { JITLOG((LL_INFO10, "IL Code Size exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsInstrCount() < opts.instrCount) { JITLOG((LL_INFO10, "IL instruction count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsBbCount() < fgBBcount) { JITLOG((LL_INFO10, "Basic Block count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsLvNumCount() < lvaCount) { JITLOG((LL_INFO10, "Local Variable Num count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsLvRefCount() < opts.lvRefCount) { JITLOG((LL_INFO10, "Local Variable Ref count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } if (theMinOptsValue == true) { JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count " "%3d,%3d for method %s\n", info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName)); if (JitConfig.JitBreakOnMinOpts() != 0) { assert(!"MinOpts enabled"); } } } #else // !DEBUG // Retail check if we should force Minopts due to the complexity of the method // For PREJIT we never drop down to MinOpts // unless unless CLFLG_MINOPT is set if (!theMinOptsValue && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && ((DEFAULT_MIN_OPTS_CODE_SIZE < info.compILCodeSize) || (DEFAULT_MIN_OPTS_INSTR_COUNT < opts.instrCount) || (DEFAULT_MIN_OPTS_BB_COUNT < fgBBcount) || (DEFAULT_MIN_OPTS_LV_NUM_COUNT < lvaCount) || (DEFAULT_MIN_OPTS_LV_REF_COUNT < opts.lvRefCount))) { theMinOptsValue = true; } #endif // DEBUG JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count %3d,%3d for method %s\n", info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName)); #if 0 // The code in this #if has been useful in debugging loop cloning issues, by // enabling selective enablement of the loop cloning optimization according to // method hash. #ifdef DEBUG if (!theMinOptsValue) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("opthashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); // methHashLo = (unsigned(atoi(lostr)) << 2); // So we don't have to use negative numbers. } char* histr = getenv("opthashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); // methHashHi = (unsigned(atoi(histr)) << 2); // So we don't have to use negative numbers. } if (methHash < methHashLo || methHash > methHashHi) { theMinOptsValue = true; } else { printf("Doing optimization in in %s (0x%x).\n", info.compFullName, methHash); } } #endif #endif _SetMinOpts: // Set the MinOpts value opts.SetMinOpts(theMinOptsValue); // Notify the VM if MinOpts is being used when not requested if (theMinOptsValue && !compIsForInlining() && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) && !opts.compDbgCode) { info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_SWITCHED_TO_MIN_OPT); opts.jitFlags->Clear(JitFlags::JIT_FLAG_TIER1); compSwitchedToMinOpts = true; } #ifdef DEBUG if (verbose && !compIsForInlining()) { printf("OPTIONS: opts.MinOpts() == %s\n", opts.MinOpts() ? "true" : "false"); } #endif /* Control the optimizations */ if (opts.OptimizationDisabled()) { opts.compFlags &= ~CLFLG_MAXOPT; opts.compFlags |= CLFLG_MINOPT; } if (!compIsForInlining()) { codeGen->setFramePointerRequired(false); codeGen->setFrameRequired(false); if (opts.OptimizationDisabled()) { codeGen->setFrameRequired(true); } #if !defined(TARGET_AMD64) // The VM sets JitFlags::JIT_FLAG_FRAMED for two reasons: (1) the COMPlus_JitFramed variable is set, or // (2) the function is marked "noinline". The reason for #2 is that people mark functions // noinline to ensure the show up on in a stack walk. But for AMD64, we don't need a frame // pointer for the frame to show up in stack walk. if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_FRAMED)) codeGen->setFrameRequired(true); #endif if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // The JIT doesn't currently support loop alignment for prejitted images. // (The JIT doesn't know the final address of the code, hence // it can't align code based on unknown addresses.) codeGen->SetAlignLoops(false); // loop alignment not supported for prejitted code } else { codeGen->SetAlignLoops(JitConfig.JitAlignLoops() == 1); } } #if TARGET_ARM // A single JitStress=1 Linux ARM32 test fails when we expand virtual calls early // JIT\HardwareIntrinsics\General\Vector128_1\Vector128_1_ro // opts.compExpandCallsEarly = (JitConfig.JitExpandCallsEarly() == 2); #else opts.compExpandCallsEarly = (JitConfig.JitExpandCallsEarly() != 0); #endif fgCanRelocateEHRegions = true; } #ifdef TARGET_ARMARCH // Function compRsvdRegCheck: // given a curState to use for calculating the total frame size // it will return true if the REG_OPT_RSVD should be reserved so // that it can be use to form large offsets when accessing stack // based LclVar including both incoming and out going argument areas. // // The method advances the frame layout state to curState by calling // lvaFrameSize(curState). // bool Compiler::compRsvdRegCheck(FrameLayoutState curState) { // Always do the layout even if returning early. Callers might // depend on us to do the layout. unsigned frameSize = lvaFrameSize(curState); JITDUMP("\n" "compRsvdRegCheck\n" " frame size = %6d\n" " compArgSize = %6d\n", frameSize, compArgSize); if (opts.MinOpts()) { // Have a recovery path in case we fail to reserve REG_OPT_RSVD and go // over the limit of SP and FP offset ranges due to large // temps. JITDUMP(" Returning true (MinOpts)\n\n"); return true; } unsigned calleeSavedRegMaxSz = CALLEE_SAVED_REG_MAXSZ; if (compFloatingPointUsed) { calleeSavedRegMaxSz += CALLEE_SAVED_FLOAT_MAXSZ; } calleeSavedRegMaxSz += REGSIZE_BYTES; // we always push LR. See genPushCalleeSavedRegisters noway_assert(frameSize >= calleeSavedRegMaxSz); #if defined(TARGET_ARM64) // TODO-ARM64-CQ: update this! JITDUMP(" Returning true (ARM64)\n\n"); return true; // just always assume we'll need it, for now #else // TARGET_ARM // frame layout: // // ... high addresses ... // frame contents size // ------------------- ------------------------ // inArgs compArgSize (includes prespill) // caller SP ---> // prespill // LR REGSIZE_BYTES // R11 ---> R11 REGSIZE_BYTES // callee saved regs CALLEE_SAVED_REG_MAXSZ (32 bytes) // optional saved fp regs CALLEE_SAVED_FLOAT_MAXSZ (64 bytes) // lclSize // incl. TEMPS MAX_SPILL_TEMP_SIZE // incl. outArgs // SP ---> // ... low addresses ... // // When codeGen->isFramePointerRequired is true, R11 will be established as a frame pointer. // We can then use R11 to access incoming args with positive offsets, and LclVars with // negative offsets. // // In functions with EH, in the non-funclet (or main) region, even though we will have a // frame pointer, we can use SP with positive offsets to access any or all locals or arguments // that we can reach with SP-relative encodings. The funclet region might require the reserved // register, since it must use offsets from R11 to access the parent frame. unsigned maxR11PositiveEncodingOffset = compFloatingPointUsed ? 0x03FC : 0x0FFF; JITDUMP(" maxR11PositiveEncodingOffset = %6d\n", maxR11PositiveEncodingOffset); // Floating point load/store instructions (VLDR/VSTR) can address up to -0x3FC from R11, but we // don't know if there are either no integer locals, or if we don't need large negative offsets // for the integer locals, so we must use the integer max negative offset, which is a // smaller (absolute value) number. unsigned maxR11NegativeEncodingOffset = 0x00FF; // This is a negative offset from R11. JITDUMP(" maxR11NegativeEncodingOffset = %6d\n", maxR11NegativeEncodingOffset); // -1 because otherwise we are computing the address just beyond the last argument, which we don't need to do. unsigned maxR11PositiveOffset = compArgSize + (2 * REGSIZE_BYTES) - 1; JITDUMP(" maxR11PositiveOffset = %6d\n", maxR11PositiveOffset); // The value is positive, but represents a negative offset from R11. // frameSize includes callee-saved space for R11 and LR, which are at non-negative offsets from R11 // (+0 and +4, respectively), so don't include those in the max possible negative offset. assert(frameSize >= (2 * REGSIZE_BYTES)); unsigned maxR11NegativeOffset = frameSize - (2 * REGSIZE_BYTES); JITDUMP(" maxR11NegativeOffset = %6d\n", maxR11NegativeOffset); if (codeGen->isFramePointerRequired()) { if (maxR11NegativeOffset > maxR11NegativeEncodingOffset) { JITDUMP(" Returning true (frame required and maxR11NegativeOffset)\n\n"); return true; } if (maxR11PositiveOffset > maxR11PositiveEncodingOffset) { JITDUMP(" Returning true (frame required and maxR11PositiveOffset)\n\n"); return true; } } // Now consider the SP based frame case. Note that we will use SP based offsets to access the stack in R11 based // frames in the non-funclet main code area. unsigned maxSPPositiveEncodingOffset = compFloatingPointUsed ? 0x03FC : 0x0FFF; JITDUMP(" maxSPPositiveEncodingOffset = %6d\n", maxSPPositiveEncodingOffset); // -1 because otherwise we are computing the address just beyond the last argument, which we don't need to do. assert(compArgSize + frameSize > 0); unsigned maxSPPositiveOffset = compArgSize + frameSize - 1; if (codeGen->isFramePointerUsed()) { // We have a frame pointer, so we can use it to access part of the stack, even if SP can't reach those parts. // We will still generate SP-relative offsets if SP can reach. // First, check that the stack between R11 and SP can be fully reached, either via negative offset from FP // or positive offset from SP. Don't count stored R11 or LR, which are reached from positive offsets from FP. unsigned maxSPLocalsCombinedOffset = frameSize - (2 * REGSIZE_BYTES) - 1; JITDUMP(" maxSPLocalsCombinedOffset = %6d\n", maxSPLocalsCombinedOffset); if (maxSPLocalsCombinedOffset > maxSPPositiveEncodingOffset) { // Can R11 help? unsigned maxRemainingLocalsCombinedOffset = maxSPLocalsCombinedOffset - maxSPPositiveEncodingOffset; JITDUMP(" maxRemainingLocalsCombinedOffset = %6d\n", maxRemainingLocalsCombinedOffset); if (maxRemainingLocalsCombinedOffset > maxR11NegativeEncodingOffset) { JITDUMP(" Returning true (frame pointer exists; R11 and SP can't reach entire stack between them)\n\n"); return true; } // Otherwise, yes, we can address the remaining parts of the locals frame with negative offsets from R11. } // Check whether either R11 or SP can access the arguments. if ((maxR11PositiveOffset > maxR11PositiveEncodingOffset) && (maxSPPositiveOffset > maxSPPositiveEncodingOffset)) { JITDUMP(" Returning true (frame pointer exists; R11 and SP can't reach all arguments)\n\n"); return true; } } else { if (maxSPPositiveOffset > maxSPPositiveEncodingOffset) { JITDUMP(" Returning true (no frame pointer exists; SP can't reach all of frame)\n\n"); return true; } } // We won't need to reserve REG_OPT_RSVD. // JITDUMP(" Returning false\n\n"); return false; #endif // TARGET_ARM } #endif // TARGET_ARMARCH //------------------------------------------------------------------------ // compGetTieringName: get a string describing tiered compilation settings // for this method // // Arguments: // wantShortName - true if a short name is ok (say for using in file names) // // Returns: // String describing tiering decisions for this method, including cases // where the jit codegen will differ from what the runtime requested. // const char* Compiler::compGetTieringName(bool wantShortName) const { const bool tier0 = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0); const bool tier1 = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1); if (!opts.compMinOptsIsSet) { // If 'compMinOptsIsSet' is not set, just return here. Otherwise, if this method is called // by the assertAbort(), we would recursively call assert while trying to get MinOpts() // and eventually stackoverflow. return "Optimization-Level-Not-Yet-Set"; } assert(!tier0 || !tier1); // We don't expect multiple TIER flags to be set at one time. if (tier0) { return "Tier0"; } else if (tier1) { if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { return "Tier1-OSR"; } else { return "Tier1"; } } else if (opts.OptimizationEnabled()) { if (compSwitchedToOptimized) { return wantShortName ? "Tier0-FullOpts" : "Tier-0 switched to FullOpts"; } else { return "FullOpts"; } } else if (opts.MinOpts()) { if (compSwitchedToMinOpts) { if (compSwitchedToOptimized) { return wantShortName ? "Tier0-FullOpts-MinOpts" : "Tier-0 switched to FullOpts, then to MinOpts"; } else { return wantShortName ? "Tier0-MinOpts" : "Tier-0 switched MinOpts"; } } else { return "MinOpts"; } } else if (opts.compDbgCode) { return "Debug"; } else { return wantShortName ? "Unknown" : "Unknown optimization level"; } } //------------------------------------------------------------------------ // compGetStressMessage: get a string describing jitstress capability // for this method // // Returns: // An empty string if stress is not enabled, else a string describing // if this method is subject to stress or is excluded by name or hash. // const char* Compiler::compGetStressMessage() const { // Add note about stress where appropriate const char* stressMessage = ""; #ifdef DEBUG // Is stress enabled via mode name or level? if ((JitConfig.JitStressModeNames() != nullptr) || (getJitStressLevel() > 0)) { // Is the method being jitted excluded from stress via range? if (bRangeAllowStress) { // Or is it excluded via name? if (!JitConfig.JitStressOnly().isEmpty() || !JitConfig.JitStressOnly().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { // Not excluded -- stress can happen stressMessage = " JitStress"; } else { stressMessage = " NoJitStress(Only)"; } } else { stressMessage = " NoJitStress(Range)"; } } #endif // DEBUG return stressMessage; } void Compiler::compFunctionTraceStart() { #ifdef DEBUG if (compIsForInlining()) { return; } if ((JitConfig.JitFunctionTrace() != 0) && !opts.disDiffable) { LONG newJitNestingLevel = InterlockedIncrement(&Compiler::jitNestingLevel); if (newJitNestingLevel <= 0) { printf("{ Illegal nesting level %d }\n", newJitNestingLevel); } for (LONG i = 0; i < newJitNestingLevel - 1; i++) { printf(" "); } printf("{ Start Jitting Method %4d %s (MethodHash=%08x) %s\n", Compiler::jitTotalMethodCompiled, info.compFullName, info.compMethodHash(), compGetTieringName()); /* } editor brace matching workaround for this printf */ } #endif // DEBUG } void Compiler::compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI) { #ifdef DEBUG assert(!compIsForInlining()); if ((JitConfig.JitFunctionTrace() != 0) && !opts.disDiffable) { LONG newJitNestingLevel = InterlockedDecrement(&Compiler::jitNestingLevel); if (newJitNestingLevel < 0) { printf("{ Illegal nesting level %d }\n", newJitNestingLevel); } for (LONG i = 0; i < newJitNestingLevel; i++) { printf(" "); } // Note: that is incorrect if we are compiling several methods at the same time. unsigned methodNumber = Compiler::jitTotalMethodCompiled - 1; /* { editor brace-matching workaround for following printf */ printf("} Jitted Method %4d at" FMT_ADDR "method %s size %08x%s%s\n", methodNumber, DBG_ADDR(methodCodePtr), info.compFullName, methodCodeSize, isNYI ? " NYI" : (compIsForImportOnly() ? " import only" : ""), opts.altJit ? " altjit" : ""); } #endif // DEBUG } //------------------------------------------------------------------------ // BeginPhase: begin execution of a phase // // Arguments: // phase - the phase that is about to begin // void Compiler::BeginPhase(Phases phase) { mostRecentlyActivePhase = phase; } //------------------------------------------------------------------------ // EndPhase: finish execution of a phase // // Arguments: // phase - the phase that has just finished // void Compiler::EndPhase(Phases phase) { #if defined(FEATURE_JIT_METHOD_PERF) if (pCompJitTimer != nullptr) { pCompJitTimer->EndPhase(this, phase); } #endif mostRecentlyActivePhase = phase; } //------------------------------------------------------------------------ // compCompile: run phases needed for compilation // // Arguments: // methodCodePtr [OUT] - address of generated code // methodCodeSize [OUT] - size of the generated code (hot + cold setions) // compileFlags [IN] - flags controlling jit behavior // // Notes: // This is the most interesting 'toplevel' function in the JIT. It goes through the operations of // importing, morphing, optimizations and code generation. This is called from the EE through the // code:CILJit::compileMethod function. // // For an overview of the structure of the JIT, see: // https://github.com/dotnet/runtime/blob/main/docs/design/coreclr/jit/ryujit-overview.md // // Also called for inlinees, though they will only be run through the first few phases. // void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { // Prepare for importation // auto preImportPhase = [this]() { if (compIsForInlining()) { // Notify root instance that an inline attempt is about to import IL impInlineRoot()->m_inlineStrategy->NoteImport(); } hashBv::Init(this); VarSetOps::AssignAllowUninitRhs(this, compCurLife, VarSetOps::UninitVal()); // The temp holding the secret stub argument is used by fgImport() when importing the intrinsic. if (info.compPublishStubParam) { assert(lvaStubArgumentVar == BAD_VAR_NUM); lvaStubArgumentVar = lvaGrabTempWithImplicitUse(false DEBUGARG("stub argument")); lvaGetDesc(lvaStubArgumentVar)->lvType = TYP_I_IMPL; // TODO-CQ: there is no need to mark it as doNotEnreg. There are no stores for this local // before codegen so liveness and LSRA mark it as "liveIn" and always allocate a stack slot for it. // However, it would be better to process it like other argument locals and keep it in // a reg for the whole method without spilling to the stack when possible. lvaSetVarDoNotEnregister(lvaStubArgumentVar DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } }; DoPhase(this, PHASE_PRE_IMPORT, preImportPhase); compFunctionTraceStart(); // Incorporate profile data. // // Note: the importer is sensitive to block weights, so this has // to happen before importation. // DoPhase(this, PHASE_INCPROFILE, &Compiler::fgIncorporateProfileData); // If we're going to instrument code, we may need to prepare before // we import. // if (compileFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { DoPhase(this, PHASE_IBCPREP, &Compiler::fgPrepareToInstrumentMethod); } // Import: convert the instrs in each basic block to a tree based intermediate representation // DoPhase(this, PHASE_IMPORTATION, &Compiler::fgImport); // Expand any patchpoints // DoPhase(this, PHASE_PATCHPOINTS, &Compiler::fgTransformPatchpoints); // If instrumenting, add block and class probes. // if (compileFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { DoPhase(this, PHASE_IBCINSTR, &Compiler::fgInstrumentMethod); } // Transform indirect calls that require control flow expansion. // DoPhase(this, PHASE_INDXCALL, &Compiler::fgTransformIndirectCalls); // PostImportPhase: cleanup inlinees // auto postImportPhase = [this]() { // If this is a viable inline candidate if (compIsForInlining() && !compDonotInline()) { // Filter out unimported BBs in the inlinee // fgPostImportationCleanup(); // Update type of return spill temp if we have gathered // better info when importing the inlinee, and the return // spill temp is single def. if (fgNeedReturnSpillTemp()) { CORINFO_CLASS_HANDLE retExprClassHnd = impInlineInfo->retExprClassHnd; if (retExprClassHnd != nullptr) { LclVarDsc* returnSpillVarDsc = lvaGetDesc(lvaInlineeReturnSpillTemp); if (returnSpillVarDsc->lvSingleDef) { lvaUpdateClass(lvaInlineeReturnSpillTemp, retExprClassHnd, impInlineInfo->retExprClassHndIsExact); } } } } }; DoPhase(this, PHASE_POST_IMPORT, postImportPhase); // If we're importing for inlining, we're done. if (compIsForInlining()) { #ifdef FEATURE_JIT_METHOD_PERF if (pCompJitTimer != nullptr) { #if MEASURE_CLRAPI_CALLS EndPhase(PHASE_CLR_API); #endif pCompJitTimer->Terminate(this, CompTimeSummaryInfo::s_compTimeSummary, false); } #endif return; } // At this point in the phase list, all the inlinee phases have // been run, and inlinee compiles have exited, so we should only // get this far if we are jitting the root method. noway_assert(!compIsForInlining()); // Maybe the caller was not interested in generating code if (compIsForImportOnly()) { compFunctionTraceEnd(nullptr, 0, false); return; } #if !FEATURE_EH // If we aren't yet supporting EH in a compiler bring-up, remove as many EH handlers as possible, so // we can pass tests that contain try/catch EH, but don't actually throw any exceptions. fgRemoveEH(); #endif // !FEATURE_EH // We could allow ESP frames. Just need to reserve space for // pushing EBP if the method becomes an EBP-frame after an edit. // Note that requiring a EBP Frame disallows double alignment. Thus if we change this // we either have to disallow double alignment for E&C some other way or handle it in EETwain. if (opts.compDbgEnC) { codeGen->setFramePointerRequired(true); // We don't care about localloc right now. If we do support it, // EECodeManager::FixContextForEnC() needs to handle it smartly // in case the localloc was actually executed. // // compLocallocUsed = true; } // Start phases that are broadly called morphing, and includes // global morph, as well as other phases that massage the trees so // that we can generate code out of them. // auto morphInitPhase = [this]() { // Initialize the BlockSet epoch NewBasicBlockEpoch(); fgOutgoingArgTemps = nullptr; // Insert call to class constructor as the first basic block if // we were asked to do so. if (info.compCompHnd->initClass(nullptr /* field */, nullptr /* method */, impTokenLookupContextHandle /* context */) & CORINFO_INITCLASS_USE_HELPER) { fgEnsureFirstBBisScratch(); fgNewStmtAtBeg(fgFirstBB, fgInitThisClass()); } #ifdef DEBUG if (opts.compGcChecks) { for (unsigned i = 0; i < info.compArgsCount; i++) { if (lvaGetDesc(i)->TypeGet() == TYP_REF) { // confirm that the argument is a GC pointer (for debugging (GC stress)) GenTree* op = gtNewLclvNode(i, TYP_REF); GenTreeCall::Use* args = gtNewCallArgs(op); op = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_VOID, args); fgEnsureFirstBBisScratch(); fgNewStmtAtEnd(fgFirstBB, op); if (verbose) { printf("\ncompGcChecks tree:\n"); gtDispTree(op); } } } } #endif // DEBUG #if defined(DEBUG) && defined(TARGET_XARCH) if (opts.compStackCheckOnRet) { lvaReturnSpCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("ReturnSpCheck")); lvaSetVarDoNotEnregister(lvaReturnSpCheck, DoNotEnregisterReason::ReturnSpCheck); lvaGetDesc(lvaReturnSpCheck)->lvType = TYP_I_IMPL; } #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) if (opts.compStackCheckOnCall) { lvaCallSpCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("CallSpCheck")); lvaGetDesc(lvaCallSpCheck)->lvType = TYP_I_IMPL; } #endif // defined(DEBUG) && defined(TARGET_X86) // Update flow graph after importation. // Removes un-imported blocks, trims EH, and ensures correct OSR entry flow. // fgPostImportationCleanup(); }; DoPhase(this, PHASE_MORPH_INIT, morphInitPhase); #ifdef DEBUG // Inliner could add basic blocks. Check that the flowgraph data is up-to-date fgDebugCheckBBlist(false, false); #endif // DEBUG // Inline callee methods into this root method // DoPhase(this, PHASE_MORPH_INLINE, &Compiler::fgInline); // Record "start" values for post-inlining cycles and elapsed time. RecordStateAtEndOfInlining(); // Transform each GT_ALLOCOBJ node into either an allocation helper call or // local variable allocation on the stack. ObjectAllocator objectAllocator(this); // PHASE_ALLOCATE_OBJECTS if (compObjectStackAllocation() && opts.OptimizationEnabled()) { objectAllocator.EnableObjectStackAllocation(); } objectAllocator.Run(); // Add any internal blocks/trees we may need // DoPhase(this, PHASE_MORPH_ADD_INTERNAL, &Compiler::fgAddInternal); // Remove empty try regions // DoPhase(this, PHASE_EMPTY_TRY, &Compiler::fgRemoveEmptyTry); // Remove empty finally regions // DoPhase(this, PHASE_EMPTY_FINALLY, &Compiler::fgRemoveEmptyFinally); // Streamline chains of finally invocations // DoPhase(this, PHASE_MERGE_FINALLY_CHAINS, &Compiler::fgMergeFinallyChains); // Clone code in finallys to reduce overhead for non-exceptional paths // DoPhase(this, PHASE_CLONE_FINALLY, &Compiler::fgCloneFinally); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Update finally target flags after EH optimizations // DoPhase(this, PHASE_UPDATE_FINALLY_FLAGS, &Compiler::fgUpdateFinallyTargetFlags); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #if DEBUG if (lvaEnregEHVars) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("JitEHWTHashLo"); unsigned methHashLo = 0; bool dump = false; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); dump = true; } char* histr = getenv("JitEHWTHashHi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); dump = true; } if (methHash < methHashLo || methHash > methHashHi) { lvaEnregEHVars = false; } else if (dump) { printf("Enregistering EH Vars for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // flush } } if (lvaEnregMultiRegVars) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("JitMultiRegHashLo"); unsigned methHashLo = 0; bool dump = false; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); dump = true; } char* histr = getenv("JitMultiRegHashHi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); dump = true; } if (methHash < methHashLo || methHash > methHashHi) { lvaEnregMultiRegVars = false; } else if (dump) { printf("Enregistering MultiReg Vars for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // flush } } #endif // Compute bbNum, bbRefs and bbPreds // // This is the first time full (not cheap) preds will be computed. // And, if we have profile data, we can now check integrity. // // From this point on the flowgraph information such as bbNum, // bbRefs or bbPreds has to be kept updated. // auto computePredsPhase = [this]() { JITDUMP("\nRenumbering the basic blocks for fgComputePred\n"); fgRenumberBlocks(); noway_assert(!fgComputePredsDone); fgComputePreds(); }; DoPhase(this, PHASE_COMPUTE_PREDS, computePredsPhase); // Now that we have pred lists, do some flow-related optimizations // if (opts.OptimizationEnabled()) { // Merge common throw blocks // DoPhase(this, PHASE_MERGE_THROWS, &Compiler::fgTailMergeThrows); // Run an early flow graph simplification pass // auto earlyUpdateFlowGraphPhase = [this]() { constexpr bool doTailDup = false; fgUpdateFlowGraph(doTailDup); }; DoPhase(this, PHASE_EARLY_UPDATE_FLOW_GRAPH, earlyUpdateFlowGraphPhase); } // Promote struct locals // auto promoteStructsPhase = [this]() { // For x64 and ARM64 we need to mark irregular parameters lvaRefCountState = RCS_EARLY; fgResetImplicitByRefRefCount(); fgPromoteStructs(); }; DoPhase(this, PHASE_PROMOTE_STRUCTS, promoteStructsPhase); // Figure out what locals are address-taken. // DoPhase(this, PHASE_STR_ADRLCL, &Compiler::fgMarkAddressExposedLocals); // Run a simple forward substitution pass. // DoPhase(this, PHASE_FWD_SUB, &Compiler::fgForwardSub); // Apply the type update to implicit byref parameters; also choose (based on address-exposed // analysis) which implicit byref promotions to keep (requires copy to initialize) or discard. // DoPhase(this, PHASE_MORPH_IMPBYREF, &Compiler::fgRetypeImplicitByRefArgs); #ifdef DEBUG // Now that locals have address-taken and implicit byref marked, we can safely apply stress. lvaStressLclFld(); fgStress64RsltMul(); #endif // DEBUG // Morph the trees in all the blocks of the method // auto morphGlobalPhase = [this]() { unsigned prevBBCount = fgBBcount; fgMorphBlocks(); // Fix any LclVar annotations on discarded struct promotion temps for implicit by-ref args fgMarkDemotedImplicitByRefArgs(); lvaRefCountState = RCS_INVALID; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (fgNeedToAddFinallyTargetBits) { // We previously wiped out the BBF_FINALLY_TARGET bits due to some morphing; add them back. fgAddFinallyTargetFlags(); fgNeedToAddFinallyTargetBits = false; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Decide the kind of code we want to generate fgSetOptions(); fgExpandQmarkNodes(); #ifdef DEBUG compCurBB = nullptr; #endif // DEBUG // If we needed to create any new BasicBlocks then renumber the blocks if (fgBBcount > prevBBCount) { fgRenumberBlocks(); } // We can now enable all phase checking activePhaseChecks = PhaseChecks::CHECK_ALL; }; DoPhase(this, PHASE_MORPH_GLOBAL, morphGlobalPhase); // GS security checks for unsafe buffers // auto gsPhase = [this]() { unsigned prevBBCount = fgBBcount; if (getNeedsGSSecurityCookie()) { gsGSChecksInitCookie(); if (compGSReorderStackLayout) { gsCopyShadowParams(); } // If we needed to create any new BasicBlocks then renumber the blocks if (fgBBcount > prevBBCount) { fgRenumberBlocks(); } } else { JITDUMP("No GS security needed\n"); } }; DoPhase(this, PHASE_GS_COOKIE, gsPhase); // Compute the block and edge weights // DoPhase(this, PHASE_COMPUTE_EDGE_WEIGHTS, &Compiler::fgComputeBlockAndEdgeWeights); #if defined(FEATURE_EH_FUNCLETS) // Create funclets from the EH handlers. // DoPhase(this, PHASE_CREATE_FUNCLETS, &Compiler::fgCreateFunclets); #endif // FEATURE_EH_FUNCLETS if (opts.OptimizationEnabled()) { // Invert loops // DoPhase(this, PHASE_INVERT_LOOPS, &Compiler::optInvertLoops); // Optimize block order // DoPhase(this, PHASE_OPTIMIZE_LAYOUT, &Compiler::optOptimizeLayout); // Compute reachability sets and dominators. // DoPhase(this, PHASE_COMPUTE_REACHABILITY, &Compiler::fgComputeReachability); // Scale block weights and mark run rarely blocks. // DoPhase(this, PHASE_SET_BLOCK_WEIGHTS, &Compiler::optSetBlockWeights); // Discover and classify natural loops (e.g. mark iterative loops as such). Also marks loop blocks // and sets bbWeight to the loop nesting levels. // DoPhase(this, PHASE_FIND_LOOPS, &Compiler::optFindLoopsPhase); // Clone loops with optimization opportunities, and choose one based on dynamic condition evaluation. // DoPhase(this, PHASE_CLONE_LOOPS, &Compiler::optCloneLoops); // Unroll loops // DoPhase(this, PHASE_UNROLL_LOOPS, &Compiler::optUnrollLoops); // Clear loop table info that is not used after this point, and might become invalid. // DoPhase(this, PHASE_CLEAR_LOOP_INFO, &Compiler::optClearLoopIterInfo); } #ifdef DEBUG fgDebugCheckLinks(); #endif // Create the variable table (and compute variable ref counts) // DoPhase(this, PHASE_MARK_LOCAL_VARS, &Compiler::lvaMarkLocalVars); // IMPORTANT, after this point, locals are ref counted. // However, ref counts are not kept incrementally up to date. assert(lvaLocalVarRefCounted()); if (opts.OptimizationEnabled()) { // Optimize boolean conditions // DoPhase(this, PHASE_OPTIMIZE_BOOLS, &Compiler::optOptimizeBools); // optOptimizeBools() might have changed the number of blocks; the dominators/reachability might be bad. } // Figure out the order in which operators are to be evaluated // DoPhase(this, PHASE_FIND_OPER_ORDER, &Compiler::fgFindOperOrder); // Weave the tree lists. Anyone who modifies the tree shapes after // this point is responsible for calling fgSetStmtSeq() to keep the // nodes properly linked. // This can create GC poll calls, and create new BasicBlocks (without updating dominators/reachability). // DoPhase(this, PHASE_SET_BLOCK_ORDER, &Compiler::fgSetBlockOrder); // At this point we know if we are fully interruptible or not if (opts.OptimizationEnabled()) { bool doSsa = true; bool doEarlyProp = true; bool doValueNum = true; bool doLoopHoisting = true; bool doCopyProp = true; bool doBranchOpt = true; bool doAssertionProp = true; bool doRangeAnalysis = true; int iterations = 1; #if defined(OPT_CONFIG) doSsa = (JitConfig.JitDoSsa() != 0); doEarlyProp = doSsa && (JitConfig.JitDoEarlyProp() != 0); doValueNum = doSsa && (JitConfig.JitDoValueNumber() != 0); doLoopHoisting = doValueNum && (JitConfig.JitDoLoopHoisting() != 0); doCopyProp = doValueNum && (JitConfig.JitDoCopyProp() != 0); doBranchOpt = doValueNum && (JitConfig.JitDoRedundantBranchOpts() != 0); doAssertionProp = doValueNum && (JitConfig.JitDoAssertionProp() != 0); doRangeAnalysis = doAssertionProp && (JitConfig.JitDoRangeAnalysis() != 0); if (opts.optRepeat) { iterations = JitConfig.JitOptRepeatCount(); } #endif // defined(OPT_CONFIG) while (iterations > 0) { if (doSsa) { // Build up SSA form for the IR // DoPhase(this, PHASE_BUILD_SSA, &Compiler::fgSsaBuild); } if (doEarlyProp) { // Propagate array length and rewrite getType() method call // DoPhase(this, PHASE_EARLY_PROP, &Compiler::optEarlyProp); } if (doValueNum) { // Value number the trees // DoPhase(this, PHASE_VALUE_NUMBER, &Compiler::fgValueNumber); } if (doLoopHoisting) { // Hoist invariant code out of loops // DoPhase(this, PHASE_HOIST_LOOP_CODE, &Compiler::optHoistLoopCode); } if (doCopyProp) { // Perform VN based copy propagation // DoPhase(this, PHASE_VN_COPY_PROP, &Compiler::optVnCopyProp); } if (doBranchOpt) { DoPhase(this, PHASE_OPTIMIZE_BRANCHES, &Compiler::optRedundantBranches); } // Remove common sub-expressions // DoPhase(this, PHASE_OPTIMIZE_VALNUM_CSES, &Compiler::optOptimizeCSEs); if (doAssertionProp) { // Assertion propagation // DoPhase(this, PHASE_ASSERTION_PROP_MAIN, &Compiler::optAssertionPropMain); } if (doRangeAnalysis) { auto rangePhase = [this]() { RangeCheck rc(this); rc.OptimizeRangeChecks(); }; // Bounds check elimination via range analysis // DoPhase(this, PHASE_OPTIMIZE_INDEX_CHECKS, rangePhase); } if (fgModified) { // update the flowgraph if we modified it during the optimization phase // auto optUpdateFlowGraphPhase = [this]() { constexpr bool doTailDup = false; fgUpdateFlowGraph(doTailDup); }; DoPhase(this, PHASE_OPT_UPDATE_FLOW_GRAPH, optUpdateFlowGraphPhase); // Recompute the edge weight if we have modified the flow graph // DoPhase(this, PHASE_COMPUTE_EDGE_WEIGHTS2, &Compiler::fgComputeEdgeWeights); } // Iterate if requested, resetting annotations first. if (--iterations == 0) { break; } ResetOptAnnotations(); RecomputeLoopInfo(); } } // Insert GC Polls DoPhase(this, PHASE_INSERT_GC_POLLS, &Compiler::fgInsertGCPolls); // Determine start of cold region if we are hot/cold splitting // DoPhase(this, PHASE_DETERMINE_FIRST_COLD_BLOCK, &Compiler::fgDetermineFirstColdBlock); #ifdef DEBUG fgDebugCheckLinks(compStressCompile(STRESS_REMORPH_TREES, 50)); // Stash the current estimate of the function's size if necessary. if (verbose) { compSizeEstimate = 0; compCycleEstimate = 0; for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { compSizeEstimate += stmt->GetCostSz(); compCycleEstimate += stmt->GetCostEx(); } } } #endif // rationalize trees Rationalizer rat(this); // PHASE_RATIONALIZE rat.Run(); // Here we do "simple lowering". When the RyuJIT backend works for all // platforms, this will be part of the more general lowering phase. For now, though, we do a separate // pass of "final lowering." We must do this before (final) liveness analysis, because this creates // range check throw blocks, in which the liveness must be correct. // DoPhase(this, PHASE_SIMPLE_LOWERING, &Compiler::fgSimpleLowering); // Enable this to gather statistical data such as // call and register argument info, flowgraph and loop info, etc. compJitStats(); #ifdef TARGET_ARM if (compLocallocUsed) { // We reserve REG_SAVED_LOCALLOC_SP to store SP on entry for stack unwinding codeGen->regSet.rsMaskResvd |= RBM_SAVED_LOCALLOC_SP; } #endif // TARGET_ARM // Assign registers to variables, etc. /////////////////////////////////////////////////////////////////////////////// // Dominator and reachability sets are no longer valid. They haven't been // maintained up to here, and shouldn't be used (unless recomputed). /////////////////////////////////////////////////////////////////////////////// fgDomsComputed = false; // Create LinearScan before Lowering, so that Lowering can call LinearScan methods // for determining whether locals are register candidates and (for xarch) whether // a node is a containable memory op. m_pLinearScan = getLinearScanAllocator(this); // Lower // m_pLowering = new (this, CMK_LSRA) Lowering(this, m_pLinearScan); // PHASE_LOWERING m_pLowering->Run(); if (!compMacOsArm64Abi()) { // Set stack levels; this information is necessary for x86 // but on other platforms it is used only in asserts. // TODO: do not run it in release on other platforms, see https://github.com/dotnet/runtime/issues/42673. StackLevelSetter stackLevelSetter(this); stackLevelSetter.Run(); } // We can not add any new tracked variables after this point. lvaTrackedFixed = true; // Now that lowering is completed we can proceed to perform register allocation // auto linearScanPhase = [this]() { m_pLinearScan->doLinearScan(); }; DoPhase(this, PHASE_LINEAR_SCAN, linearScanPhase); // Copied from rpPredictRegUse() SetFullPtrRegMapRequired(codeGen->GetInterruptible() || !codeGen->isFramePointerUsed()); #if FEATURE_LOOP_ALIGN // Place loop alignment instructions DoPhase(this, PHASE_ALIGN_LOOPS, &Compiler::placeLoopAlignInstructions); #endif // Generate code codeGen->genGenerateCode(methodCodePtr, methodCodeSize); #if TRACK_LSRA_STATS if (JitConfig.DisplayLsraStats() == 2) { m_pLinearScan->dumpLsraStatsCsv(jitstdout); } #endif // TRACK_LSRA_STATS // We're done -- set the active phase to the last phase // (which isn't really a phase) mostRecentlyActivePhase = PHASE_POST_EMIT; #ifdef FEATURE_JIT_METHOD_PERF if (pCompJitTimer) { #if MEASURE_CLRAPI_CALLS EndPhase(PHASE_CLR_API); #else EndPhase(PHASE_POST_EMIT); #endif pCompJitTimer->Terminate(this, CompTimeSummaryInfo::s_compTimeSummary, true); } #endif // Generate PatchpointInfo generatePatchpointInfo(); RecordStateAtEndOfCompilation(); #ifdef FEATURE_TRACELOGGING compJitTelemetry.NotifyEndOfCompilation(); #endif #if defined(DEBUG) ++Compiler::jitTotalMethodCompiled; #endif // defined(DEBUG) compFunctionTraceEnd(*methodCodePtr, *methodCodeSize, false); JITDUMP("Method code size: %d\n", (unsigned)(*methodCodeSize)); #if FUNC_INFO_LOGGING if (compJitFuncInfoFile != nullptr) { assert(!compIsForInlining()); #ifdef DEBUG // We only have access to info.compFullName in DEBUG builds. fprintf(compJitFuncInfoFile, "%s\n", info.compFullName); #elif FEATURE_SIMD fprintf(compJitFuncInfoFile, " %s\n", eeGetMethodFullName(info.compMethodHnd)); #endif fprintf(compJitFuncInfoFile, ""); // in our logic this causes a flush } #endif // FUNC_INFO_LOGGING } #if FEATURE_LOOP_ALIGN //------------------------------------------------------------------------ // placeLoopAlignInstructions: Iterate over all the blocks and determine // the best position to place the 'align' instruction. Inserting 'align' // instructions after an unconditional branch is preferred over inserting // in the block before the loop. In case there are multiple blocks // having 'jmp', the one that has lower weight is preferred. // If the block having 'jmp' is hotter than the block before the loop, // the align will still be placed after 'jmp' because the processor should // be smart enough to not fetch extra instruction beyond jmp. // void Compiler::placeLoopAlignInstructions() { if (loopAlignCandidates == 0) { return; } int loopsToProcess = loopAlignCandidates; JITDUMP("Inside placeLoopAlignInstructions for %d loops.\n", loopAlignCandidates); // Add align only if there were any loops that needed alignment weight_t minBlockSoFar = BB_MAX_WEIGHT; BasicBlock* bbHavingAlign = nullptr; BasicBlock::loopNumber currentAlignedLoopNum = BasicBlock::NOT_IN_LOOP; if ((fgFirstBB != nullptr) && fgFirstBB->isLoopAlign()) { // Adding align instruction in prolog is not supported // hence just remove that loop from our list. loopsToProcess--; } for (BasicBlock* const block : Blocks()) { if (currentAlignedLoopNum != BasicBlock::NOT_IN_LOOP) { // We've been processing blocks within an aligned loop. Are we out of that loop now? if (currentAlignedLoopNum != block->bbNatLoopNum) { currentAlignedLoopNum = BasicBlock::NOT_IN_LOOP; } } // If there is a unconditional jump (which is not part of callf/always pair) if (opts.compJitHideAlignBehindJmp && (block->bbJumpKind == BBJ_ALWAYS) && !block->isBBCallAlwaysPairTail()) { // Track the lower weight blocks if (block->bbWeight < minBlockSoFar) { if (currentAlignedLoopNum == BasicBlock::NOT_IN_LOOP) { // Ok to insert align instruction in this block because it is not part of any aligned loop. minBlockSoFar = block->bbWeight; bbHavingAlign = block; JITDUMP(FMT_BB ", bbWeight=" FMT_WT " ends with unconditional 'jmp' \n", block->bbNum, block->bbWeight); } } } if ((block->bbNext != nullptr) && (block->bbNext->isLoopAlign())) { // If jmp was not found, then block before the loop start is where align instruction will be added. if (bbHavingAlign == nullptr) { bbHavingAlign = block; JITDUMP("Marking " FMT_BB " before the loop with BBF_HAS_ALIGN for loop at " FMT_BB "\n", block->bbNum, block->bbNext->bbNum); } else { JITDUMP("Marking " FMT_BB " that ends with unconditional jump with BBF_HAS_ALIGN for loop at " FMT_BB "\n", bbHavingAlign->bbNum, block->bbNext->bbNum); } bbHavingAlign->bbFlags |= BBF_HAS_ALIGN; minBlockSoFar = BB_MAX_WEIGHT; bbHavingAlign = nullptr; currentAlignedLoopNum = block->bbNext->bbNatLoopNum; if (--loopsToProcess == 0) { break; } } } assert(loopsToProcess == 0); } #endif //------------------------------------------------------------------------ // generatePatchpointInfo: allocate and fill in patchpoint info data, // and report it to the VM // void Compiler::generatePatchpointInfo() { if (!doesMethodHavePatchpoints() && !doesMethodHavePartialCompilationPatchpoints()) { // Nothing to report return; } // Patchpoints are only found in Tier0 code, which is unoptimized, and so // should always have frame pointer. assert(codeGen->isFramePointerUsed()); // Allocate patchpoint info storage from runtime, and fill in initial bits of data. const unsigned patchpointInfoSize = PatchpointInfo::ComputeSize(info.compLocalsCount); PatchpointInfo* const patchpointInfo = (PatchpointInfo*)info.compCompHnd->allocateArray(patchpointInfoSize); // Patchpoint offsets always refer to "virtual frame offsets". // // For x64 this falls out because Tier0 frames are always FP frames, and so the FP-relative // offset is what we want. // // For arm64, if the frame pointer is not at the top of the frame, we need to adjust the // offset. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) // We add +TARGET_POINTER_SIZE here is to account for the slot that Jit_Patchpoint // creates when it simulates calling the OSR method (the "pseudo return address" slot). // This is effectively a new slot at the bottom of the Tier0 frame. // const int totalFrameSize = codeGen->genTotalFrameSize() + TARGET_POINTER_SIZE; const int offsetAdjust = 0; #elif defined(TARGET_ARM64) // SP is not manipulated by calls so no frame size adjustment needed. // Local Offsets may need adjusting, if FP is at bottom of frame. // const int totalFrameSize = codeGen->genTotalFrameSize(); const int offsetAdjust = codeGen->genSPtoFPdelta() - totalFrameSize; #else NYI("patchpoint info generation"); const int offsetAdjust = 0; const int totalFrameSize = 0; #endif patchpointInfo->Initialize(info.compLocalsCount, totalFrameSize); JITDUMP("--OSR--- Total Frame Size %d, local offset adjust is %d\n", patchpointInfo->TotalFrameSize(), offsetAdjust); // We record offsets for all the "locals" here. Could restrict // this to just the IL locals with some extra logic, and save a bit of space, // but would need to adjust all consumers, too. for (unsigned lclNum = 0; lclNum < info.compLocalsCount; lclNum++) { LclVarDsc* const varDsc = lvaGetDesc(lclNum); // We expect all these to have stack homes, and be FP relative assert(varDsc->lvOnFrame); assert(varDsc->lvFramePointerBased); // Record FramePtr relative offset (no localloc yet) patchpointInfo->SetOffset(lclNum, varDsc->GetStackOffset() + offsetAdjust); // Note if IL stream contained an address-of that potentially leads to exposure. // This bit of IL may be skipped by OSR partial importation. if (varDsc->lvHasLdAddrOp) { patchpointInfo->SetIsExposed(lclNum); } JITDUMP("--OSR-- V%02u is at virtual offset %d%s\n", lclNum, patchpointInfo->Offset(lclNum), patchpointInfo->IsExposed(lclNum) ? " (exposed)" : ""); } // Special offsets // if (lvaReportParamTypeArg()) { const int offset = lvaCachedGenericContextArgOffset(); patchpointInfo->SetGenericContextArgOffset(offset + offsetAdjust); JITDUMP("--OSR-- cached generic context virtual offset is %d\n", patchpointInfo->GenericContextArgOffset()); } if (lvaKeepAliveAndReportThis()) { const int offset = lvaCachedGenericContextArgOffset(); patchpointInfo->SetKeptAliveThisOffset(offset + offsetAdjust); JITDUMP("--OSR-- kept-alive this virtual offset is %d\n", patchpointInfo->KeptAliveThisOffset()); } if (compGSReorderStackLayout) { assert(lvaGSSecurityCookie != BAD_VAR_NUM); LclVarDsc* const varDsc = lvaGetDesc(lvaGSSecurityCookie); patchpointInfo->SetSecurityCookieOffset(varDsc->GetStackOffset() + offsetAdjust); JITDUMP("--OSR-- security cookie V%02u virtual offset is %d\n", lvaGSSecurityCookie, patchpointInfo->SecurityCookieOffset()); } if (lvaMonAcquired != BAD_VAR_NUM) { LclVarDsc* const varDsc = lvaGetDesc(lvaMonAcquired); patchpointInfo->SetMonitorAcquiredOffset(varDsc->GetStackOffset() + offsetAdjust); JITDUMP("--OSR-- monitor acquired V%02u virtual offset is %d\n", lvaMonAcquired, patchpointInfo->MonitorAcquiredOffset()); } #if defined(TARGET_AMD64) // Record callee save registers. // Currently only needed for x64. // regMaskTP rsPushRegs = codeGen->regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; rsPushRegs |= RBM_FPBASE; patchpointInfo->SetCalleeSaveRegisters((uint64_t)rsPushRegs); JITDUMP("--OSR-- Tier0 callee saves: "); JITDUMPEXEC(dspRegMask((regMaskTP)patchpointInfo->CalleeSaveRegisters())); JITDUMP("\n"); #endif // Register this with the runtime. info.compCompHnd->setPatchpointInfo(patchpointInfo); } //------------------------------------------------------------------------ // ResetOptAnnotations: Clear annotations produced during global optimizations. // // Notes: // The intent of this method is to clear any information typically assumed // to be set only once; it is used between iterations when JitOptRepeat is // in effect. void Compiler::ResetOptAnnotations() { assert(opts.optRepeat); assert(JitConfig.JitOptRepeatCount() > 0); fgResetForSsa(); vnStore = nullptr; m_opAsgnVarDefSsaNums = nullptr; m_blockToEHPreds = nullptr; fgSsaPassesCompleted = 0; fgVNPassesCompleted = 0; for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { for (GenTree* const tree : stmt->TreeList()) { tree->ClearVN(); tree->ClearAssertion(); tree->gtCSEnum = NO_CSE; } } } } //------------------------------------------------------------------------ // RecomputeLoopInfo: Recompute loop annotations between opt-repeat iterations. // // Notes: // The intent of this method is to update loop structure annotations, and those // they depend on; these annotations may have become stale during optimization, // and need to be up-to-date before running another iteration of optimizations. // void Compiler::RecomputeLoopInfo() { assert(opts.optRepeat); assert(JitConfig.JitOptRepeatCount() > 0); // Recompute reachability sets, dominators, and loops. optResetLoopInfo(); fgDomsComputed = false; fgComputeReachability(); optSetBlockWeights(); // Rebuild the loop tree annotations themselves optFindLoops(); } /*****************************************************************************/ void Compiler::ProcessShutdownWork(ICorStaticInfo* statInfo) { } /*****************************************************************************/ #ifdef DEBUG void* forceFrameJIT; // used to force to frame &useful for fastchecked debugging bool Compiler::skipMethod() { static ConfigMethodRange fJitRange; fJitRange.EnsureInit(JitConfig.JitRange()); assert(!fJitRange.Error()); // Normally JitConfig.JitRange() is null, we don't want to skip // jitting any methods. // // So, the logic below relies on the fact that a null range string // passed to ConfigMethodRange represents the set of all methods. if (!fJitRange.Contains(info.compMethodHash())) { return true; } if (JitConfig.JitExclude().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } if (!JitConfig.JitInclude().isEmpty() && !JitConfig.JitInclude().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } return false; } #endif /*****************************************************************************/ int Compiler::compCompile(CORINFO_MODULE_HANDLE classPtr, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { // compInit should have set these already. noway_assert(info.compMethodInfo != nullptr); noway_assert(info.compCompHnd != nullptr); noway_assert(info.compMethodHnd != nullptr); #ifdef FEATURE_JIT_METHOD_PERF static bool checkedForJitTimeLog = false; pCompJitTimer = nullptr; if (!checkedForJitTimeLog) { // Call into VM to get the config strings. FEATURE_JIT_METHOD_PERF is enabled for // retail builds. Do not call the regular Config helper here as it would pull // in a copy of the config parser into the clrjit.dll. InterlockedCompareExchangeT(&Compiler::compJitTimeLogFilename, (LPCWSTR)info.compCompHnd->getJitTimeLogFilename(), NULL); // At a process or module boundary clear the file and start afresh. JitTimer::PrintCsvHeader(); checkedForJitTimeLog = true; } if ((Compiler::compJitTimeLogFilename != nullptr) || (JitTimeLogCsv() != nullptr)) { pCompJitTimer = JitTimer::Create(this, info.compMethodInfo->ILCodeSize); } #endif // FEATURE_JIT_METHOD_PERF #ifdef DEBUG Compiler* me = this; forceFrameJIT = (void*)&me; // let us see the this pointer in fastchecked build // set this early so we can use it without relying on random memory values verbose = compIsForInlining() ? impInlineInfo->InlinerCompiler->verbose : false; #endif #if FUNC_INFO_LOGGING LPCWSTR tmpJitFuncInfoFilename = JitConfig.JitFuncInfoFile(); if (tmpJitFuncInfoFilename != nullptr) { LPCWSTR oldFuncInfoFileName = InterlockedCompareExchangeT(&compJitFuncInfoFilename, tmpJitFuncInfoFilename, NULL); if (oldFuncInfoFileName == nullptr) { assert(compJitFuncInfoFile == nullptr); compJitFuncInfoFile = _wfopen(compJitFuncInfoFilename, W("a")); if (compJitFuncInfoFile == nullptr) { #if defined(DEBUG) && !defined(HOST_UNIX) // no 'perror' in the PAL perror("Failed to open JitFuncInfoLogFile"); #endif // defined(DEBUG) && !defined(HOST_UNIX) } } } #endif // FUNC_INFO_LOGGING // if (s_compMethodsCount==0) setvbuf(jitstdout, NULL, _IONBF, 0); if (compIsForInlining()) { compileFlags->Clear(JitFlags::JIT_FLAG_OSR); info.compILEntry = 0; info.compPatchpointInfo = nullptr; } else if (compileFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { // Fetch OSR info from the runtime info.compPatchpointInfo = info.compCompHnd->getOSRInfo(&info.compILEntry); assert(info.compPatchpointInfo != nullptr); } #if defined(TARGET_ARM64) compFrameInfo = {0}; #endif virtualStubParamInfo = new (this, CMK_Unknown) VirtualStubParamInfo(IsTargetAbi(CORINFO_CORERT_ABI)); // compMatchedVM is set to true if both CPU/ABI and OS are matching the execution engine requirements // // Do we have a matched VM? Or are we "abusing" the VM to help us do JIT work (such as using an x86 native VM // with an ARM-targeting "altjit"). // Match CPU/ABI for compMatchedVM info.compMatchedVM = IMAGE_FILE_MACHINE_TARGET == info.compCompHnd->getExpectedTargetArchitecture(); // Match OS for compMatchedVM CORINFO_EE_INFO* eeInfo = eeGetEEInfo(); #ifdef TARGET_OS_RUNTIMEDETERMINED noway_assert(TargetOS::OSSettingConfigured); #endif if (TargetOS::IsMacOS) { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_MACOS); } else if (TargetOS::IsUnix) { if (TargetArchitecture::IsX64) { // MacOS x64 uses the Unix jit variant in crossgen2, not a special jit info.compMatchedVM = info.compMatchedVM && ((eeInfo->osType == CORINFO_UNIX) || (eeInfo->osType == CORINFO_MACOS)); } else { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_UNIX); } } else if (TargetOS::IsWindows) { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_WINNT); } // If we are not compiling for a matched VM, then we are getting JIT flags that don't match our target // architecture. The two main examples here are an ARM targeting altjit hosted on x86 and an ARM64 // targeting altjit hosted on x64. (Though with cross-bitness work, the host doesn't necessarily need // to be of the same bitness.) In these cases, we need to fix up the JIT flags to be appropriate for // the target, as the VM's expected target may overlap bit flags with different meaning to our target. // Note that it might be better to do this immediately when setting the JIT flags in CILJit::compileMethod() // (when JitFlags::SetFromFlags() is called), but this is close enough. (To move this logic to // CILJit::compileMethod() would require moving the info.compMatchedVM computation there as well.) if (!info.compMatchedVM) { #if defined(TARGET_ARM) // Currently nothing needs to be done. There are no ARM flags that conflict with other flags. #endif // defined(TARGET_ARM) #if defined(TARGET_ARM64) // The x86/x64 architecture capabilities flags overlap with the ARM64 ones. Set a reasonable architecture // target default. Currently this is disabling all ARM64 architecture features except FP and SIMD, but this // should be altered to possibly enable all of them, when they are known to all work. CORINFO_InstructionSetFlags defaultArm64Flags; defaultArm64Flags.AddInstructionSet(InstructionSet_ArmBase); defaultArm64Flags.AddInstructionSet(InstructionSet_AdvSimd); defaultArm64Flags.Set64BitInstructionSetVariants(); compileFlags->SetInstructionSetFlags(defaultArm64Flags); #endif // defined(TARGET_ARM64) } compMaxUncheckedOffsetForNullObject = eeGetEEInfo()->maxUncheckedOffsetForNullObject; // Set the context for token lookup. if (compIsForInlining()) { impTokenLookupContextHandle = impInlineInfo->tokenLookupContextHandle; assert(impInlineInfo->inlineCandidateInfo->clsHandle == info.compCompHnd->getMethodClass(info.compMethodHnd)); info.compClassHnd = impInlineInfo->inlineCandidateInfo->clsHandle; assert(impInlineInfo->inlineCandidateInfo->clsAttr == info.compCompHnd->getClassAttribs(info.compClassHnd)); // printf("%x != %x\n", impInlineInfo->inlineCandidateInfo->clsAttr, // info.compCompHnd->getClassAttribs(info.compClassHnd)); info.compClassAttr = impInlineInfo->inlineCandidateInfo->clsAttr; } else { impTokenLookupContextHandle = METHOD_BEING_COMPILED_CONTEXT(); info.compClassHnd = info.compCompHnd->getMethodClass(info.compMethodHnd); info.compClassAttr = info.compCompHnd->getClassAttribs(info.compClassHnd); } #ifdef DEBUG if (JitConfig.EnableExtraSuperPmiQueries()) { // This call to getClassModule/getModuleAssembly/getAssemblyName fails in crossgen2 due to these // APIs being unimplemented. So disable this extra info for pre-jit mode. See // https://github.com/dotnet/runtime/issues/48888. // // Ditto for some of the class name queries for generic params. // if (!compileFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // Get the assembly name, to aid finding any particular SuperPMI method context function (void)info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); // Fetch class names for the method's generic parameters. // CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(info.compMethodHnd, &sig, nullptr); const unsigned classInst = sig.sigInst.classInstCount; if (classInst > 0) { for (unsigned i = 0; i < classInst; i++) { eeGetClassName(sig.sigInst.classInst[i]); } } const unsigned methodInst = sig.sigInst.methInstCount; if (methodInst > 0) { for (unsigned i = 0; i < methodInst; i++) { eeGetClassName(sig.sigInst.methInst[i]); } } } } #endif // DEBUG info.compProfilerCallback = false; // Assume false until we are told to hook this method. #ifdef DEBUG if (!compIsForInlining()) { JitTls::GetLogEnv()->setCompiler(this); } // Have we been told to be more selective in our Jitting? if (skipMethod()) { if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_MARKED_AS_SKIPPED); } return CORJIT_SKIPPED; } #endif // DEBUG // Set this before the first 'BADCODE' // Skip verification where possible assert(compileFlags->IsSet(JitFlags::JIT_FLAG_SKIP_VERIFICATION)); /* Setup an error trap */ struct Param { Compiler* pThis; CORINFO_MODULE_HANDLE classPtr; COMP_HANDLE compHnd; CORINFO_METHOD_INFO* methodInfo; void** methodCodePtr; uint32_t* methodCodeSize; JitFlags* compileFlags; int result; } param; param.pThis = this; param.classPtr = classPtr; param.compHnd = info.compCompHnd; param.methodInfo = info.compMethodInfo; param.methodCodePtr = methodCodePtr; param.methodCodeSize = methodCodeSize; param.compileFlags = compileFlags; param.result = CORJIT_INTERNALERROR; setErrorTrap(info.compCompHnd, Param*, pParam, &param) // ERROR TRAP: Start normal block { pParam->result = pParam->pThis->compCompileHelper(pParam->classPtr, pParam->compHnd, pParam->methodInfo, pParam->methodCodePtr, pParam->methodCodeSize, pParam->compileFlags); } finallyErrorTrap() // ERROR TRAP: The following block handles errors { /* Cleanup */ if (compIsForInlining()) { goto DoneCleanUp; } /* Tell the emitter that we're done with this function */ GetEmitter()->emitEndCG(); DoneCleanUp: compDone(); } endErrorTrap() // ERROR TRAP: End return param.result; } #if defined(DEBUG) || defined(INLINE_DATA) //------------------------------------------------------------------------ // compMethodHash: get hash code for currently jitted method // // Returns: // Hash based on method's full name // unsigned Compiler::Info::compMethodHash() const { if (compMethodHashPrivate == 0) { // compMethodHashPrivate = compCompHnd->getMethodHash(compMethodHnd); assert(compFullName != nullptr); assert(*compFullName != 0); COUNT_T hash = HashStringA(compFullName); // Use compFullName to generate the hash, as it contains the signature // and return type compMethodHashPrivate = hash; } return compMethodHashPrivate; } //------------------------------------------------------------------------ // compMethodHash: get hash code for specified method // // Arguments: // methodHnd - method of interest // // Returns: // Hash based on method's full name // unsigned Compiler::compMethodHash(CORINFO_METHOD_HANDLE methodHnd) { // If this is the root method, delegate to the caching version // if (methodHnd == info.compMethodHnd) { return info.compMethodHash(); } // Else compute from scratch. Might consider caching this too. // unsigned methodHash = 0; const char* calleeName = eeGetMethodFullName(methodHnd); if (calleeName != nullptr) { methodHash = HashStringA(calleeName); } else { methodHash = info.compCompHnd->getMethodHash(methodHnd); } return methodHash; } #endif // defined(DEBUG) || defined(INLINE_DATA) void Compiler::compCompileFinish() { #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS genMethodCnt++; #endif #if MEASURE_MEM_ALLOC { compArenaAllocator->finishMemStats(); memAllocHist.record((unsigned)((compArenaAllocator->getTotalBytesAllocated() + 1023) / 1024)); memUsedHist.record((unsigned)((compArenaAllocator->getTotalBytesUsed() + 1023) / 1024)); } #ifdef DEBUG if (s_dspMemStats || verbose) { printf("\nAllocations for %s (MethodHash=%08x)\n", info.compFullName, info.compMethodHash()); compArenaAllocator->dumpMemStats(jitstdout); } #endif // DEBUG #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS AddLoopHoistStats(); #endif // LOOP_HOIST_STATS #if MEASURE_NODE_SIZE genTreeNcntHist.record(static_cast<unsigned>(genNodeSizeStatsPerFunc.genTreeNodeCnt)); genTreeNsizHist.record(static_cast<unsigned>(genNodeSizeStatsPerFunc.genTreeNodeSize)); #endif #if defined(DEBUG) // Small methods should fit in ArenaAllocator::getDefaultPageSize(), or else // we should bump up ArenaAllocator::getDefaultPageSize() if ((info.compILCodeSize <= 32) && // Is it a reasonably small method? (info.compNativeCodeSize < 512) && // Some trivial methods generate huge native code. eg. pushing a single huge // struct (impInlinedCodeSize <= 128) && // Is the the inlining reasonably bounded? // Small methods cannot meaningfully have a big number of locals // or arguments. We always track arguments at the start of // the prolog which requires memory (info.compLocalsCount <= 32) && (!opts.MinOpts()) && // We may have too many local variables, etc (getJitStressLevel() == 0) && // We need extra memory for stress !opts.optRepeat && // We need extra memory to repeat opts !compArenaAllocator->bypassHostAllocator() && // ArenaAllocator::getDefaultPageSize() is artificially low for // DirectAlloc // Factor of 2x is because data-structures are bigger under DEBUG (compArenaAllocator->getTotalBytesAllocated() > (2 * ArenaAllocator::getDefaultPageSize())) && // RyuJIT backend needs memory tuning! TODO-Cleanup: remove this case when memory tuning is complete. (compArenaAllocator->getTotalBytesAllocated() > (10 * ArenaAllocator::getDefaultPageSize())) && !verbose) // We allocate lots of memory to convert sets to strings for JitDump { genSmallMethodsNeedingExtraMemoryCnt++; // Less than 1% of all methods should run into this. // We cannot be more strict as there are always degenerate cases where we // would need extra memory (like huge structs as locals - see lvaSetStruct()). assert((genMethodCnt < 500) || (genSmallMethodsNeedingExtraMemoryCnt < (genMethodCnt / 100))); } #endif // DEBUG #if defined(DEBUG) || defined(INLINE_DATA) m_inlineStrategy->DumpData(); if (JitConfig.JitInlineDumpXmlFile() != nullptr) { FILE* file = _wfopen(JitConfig.JitInlineDumpXmlFile(), W("a")); if (file != nullptr) { m_inlineStrategy->DumpXml(file); fclose(file); } else { m_inlineStrategy->DumpXml(); } } else { m_inlineStrategy->DumpXml(); } #endif #ifdef DEBUG if (opts.dspOrder) { // mdMethodDef __stdcall CEEInfo::getMethodDefFromMethod(CORINFO_METHOD_HANDLE hMethod) mdMethodDef currentMethodToken = info.compCompHnd->getMethodDefFromMethod(info.compMethodHnd); static bool headerPrinted = false; if (!headerPrinted) { // clang-format off headerPrinted = true; printf(" | Profiled | Method | Method has | calls | Num |LclV |AProp| CSE | Perf |bytes | %3s codesize| \n", Target::g_tgtCPUName); printf(" mdToken | CNT | RGN | Hash | EH | FRM | LOOP | NRM | IND | BBs | Cnt | Cnt | Cnt | Score | IL | HOT | CLD | method name \n"); printf("---------+------+------+----------+----+-----+------+-----+-----+-----+-----+-----+-----+---------+------+-------+-----+\n"); // 06001234 | 1234 | HOT | 0f1e2d3c | EH | ebp | LOOP | 15 | 6 | 12 | 17 | 12 | 8 | 1234.56 | 145 | 1234 | 123 | System.Example(int) // clang-format on } printf("%08X | ", currentMethodToken); if (fgHaveProfileData()) { if (fgCalledCount < 1000) { printf("%4.0f | ", fgCalledCount); } else if (fgCalledCount < 1000000) { printf("%3.0fK | ", fgCalledCount / 1000); } else { printf("%3.0fM | ", fgCalledCount / 1000000); } } else { printf(" | "); } CorInfoRegionKind regionKind = info.compMethodInfo->regionKind; if (opts.altJit) { printf("ALT | "); } else if (regionKind == CORINFO_REGION_NONE) { printf(" | "); } else if (regionKind == CORINFO_REGION_HOT) { printf(" HOT | "); } else if (regionKind == CORINFO_REGION_COLD) { printf("COLD | "); } else if (regionKind == CORINFO_REGION_JIT) { printf(" JIT | "); } else { printf("UNKN | "); } printf("%08x | ", info.compMethodHash()); if (compHndBBtabCount > 0) { printf("EH | "); } else { printf(" | "); } if (rpFrameType == FT_EBP_FRAME) { printf("%3s | ", STR_FPBASE); } else if (rpFrameType == FT_ESP_FRAME) { printf("%3s | ", STR_SPBASE); } #if DOUBLE_ALIGN else if (rpFrameType == FT_DOUBLE_ALIGN_FRAME) { printf("dbl | "); } #endif else // (rpFrameType == FT_NOT_SET) { printf("??? | "); } if (fgHasLoops) { printf("LOOP |"); } else { printf(" |"); } printf(" %3d |", optCallCount); printf(" %3d |", optIndirectCallCount); printf(" %3d |", fgBBcountAtCodegen); printf(" %3d |", lvaCount); if (opts.MinOpts()) { printf(" MinOpts |"); } else { printf(" %3d |", optAssertionCount); printf(" %3d |", optCSEcount); } if (info.compPerfScore < 9999.995) { printf(" %7.2f |", info.compPerfScore); } else { printf(" %7.0f |", info.compPerfScore); } printf(" %4d |", info.compMethodInfo->ILCodeSize); printf(" %5d |", info.compTotalHotCodeSize); printf(" %3d |", info.compTotalColdCodeSize); printf(" %s\n", eeGetMethodFullName(info.compMethodHnd)); printf(""); // in our logic this causes a flush } if (verbose) { printf("****** DONE compiling %s\n", info.compFullName); printf(""); // in our logic this causes a flush } #if TRACK_ENREG_STATS for (unsigned i = 0; i < lvaCount; ++i) { const LclVarDsc* varDsc = lvaGetDesc(i); if (varDsc->lvRefCnt() != 0) { s_enregisterStats.RecordLocal(varDsc); } } #endif // TRACK_ENREG_STATS // Only call _DbgBreakCheck when we are jitting, not when we are ngen-ing // For ngen the int3 or breakpoint instruction will be right at the // start of the ngen method and we will stop when we execute it. // if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if (compJitHaltMethod()) { #if !defined(HOST_UNIX) // TODO-UNIX: re-enable this when we have an OS that supports a pop-up dialog // Don't do an assert, but just put up the dialog box so we get just-in-time debugger // launching. When you hit 'retry' it will continue and naturally stop at the INT 3 // that the JIT put in the code _DbgBreakCheck(__FILE__, __LINE__, "JitHalt"); #endif } } #endif // DEBUG } #ifdef PSEUDORANDOM_NOP_INSERTION // this is zlib adler32 checksum. source came from windows base #define BASE 65521L // largest prime smaller than 65536 #define NMAX 5552 // NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 #define DO1(buf, i) \ { \ s1 += buf[i]; \ s2 += s1; \ } #define DO2(buf, i) \ DO1(buf, i); \ DO1(buf, i + 1); #define DO4(buf, i) \ DO2(buf, i); \ DO2(buf, i + 2); #define DO8(buf, i) \ DO4(buf, i); \ DO4(buf, i + 4); #define DO16(buf) \ DO8(buf, 0); \ DO8(buf, 8); unsigned adler32(unsigned adler, char* buf, unsigned int len) { unsigned int s1 = adler & 0xffff; unsigned int s2 = (adler >> 16) & 0xffff; int k; if (buf == NULL) return 1L; while (len > 0) { k = len < NMAX ? len : NMAX; len -= k; while (k >= 16) { DO16(buf); buf += 16; k -= 16; } if (k != 0) do { s1 += *buf++; s2 += s1; } while (--k); s1 %= BASE; s2 %= BASE; } return (s2 << 16) | s1; } #endif unsigned getMethodBodyChecksum(_In_z_ char* code, int size) { #ifdef PSEUDORANDOM_NOP_INSERTION return adler32(0, code, size); #else return 0; #endif } int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { CORINFO_METHOD_HANDLE methodHnd = info.compMethodHnd; info.compCode = methodInfo->ILCode; info.compILCodeSize = methodInfo->ILCodeSize; info.compILImportSize = 0; if (info.compILCodeSize == 0) { BADCODE("code size is zero"); } if (compIsForInlining()) { #ifdef DEBUG unsigned methAttr_Old = impInlineInfo->inlineCandidateInfo->methAttr; unsigned methAttr_New = info.compCompHnd->getMethodAttribs(info.compMethodHnd); unsigned flagsToIgnore = CORINFO_FLG_DONT_INLINE | CORINFO_FLG_FORCEINLINE; assert((methAttr_Old & (~flagsToIgnore)) == (methAttr_New & (~flagsToIgnore))); #endif info.compFlags = impInlineInfo->inlineCandidateInfo->methAttr; compInlineContext = impInlineInfo->inlineContext; } else { info.compFlags = info.compCompHnd->getMethodAttribs(info.compMethodHnd); #ifdef PSEUDORANDOM_NOP_INSERTION info.compChecksum = getMethodBodyChecksum((char*)methodInfo->ILCode, methodInfo->ILCodeSize); #endif compInlineContext = m_inlineStrategy->GetRootContext(); } compSwitchedToOptimized = false; compSwitchedToMinOpts = false; // compInitOptions will set the correct verbose flag. compInitOptions(compileFlags); if (!compIsForInlining() && !opts.altJit && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { // We're an altjit, but the COMPlus_AltJit configuration did not say to compile this method, // so skip it. return CORJIT_SKIPPED; } #ifdef DEBUG if (verbose) { printf("IL to import:\n"); dumpILRange(info.compCode, info.compILCodeSize); } #endif // Check for COMPlus_AggressiveInlining if (JitConfig.JitAggressiveInlining()) { compDoAggressiveInlining = true; } if (compDoAggressiveInlining) { info.compFlags |= CORINFO_FLG_FORCEINLINE; } #ifdef DEBUG // Check for ForceInline stress. if (compStressCompile(STRESS_FORCE_INLINE, 0)) { info.compFlags |= CORINFO_FLG_FORCEINLINE; } if (compIsForInlining()) { JITLOG((LL_INFO100000, "\nINLINER impTokenLookupContextHandle for %s is 0x%p.\n", eeGetMethodFullName(info.compMethodHnd), dspPtr(impTokenLookupContextHandle))); } #endif // DEBUG impCanReimport = compStressCompile(STRESS_CHK_REIMPORT, 15); /* Initialize set a bunch of global values */ info.compScopeHnd = classPtr; info.compXcptnsCount = methodInfo->EHcount; info.compMaxStack = methodInfo->maxStack; compHndBBtab = nullptr; compHndBBtabCount = 0; compHndBBtabAllocCount = 0; info.compNativeCodeSize = 0; info.compTotalHotCodeSize = 0; info.compTotalColdCodeSize = 0; info.compClassProbeCount = 0; compHasBackwardJump = false; compHasBackwardJumpInHandler = false; #ifdef DEBUG compCurBB = nullptr; lvaTable = nullptr; // Reset node and block ID counter compGenTreeID = 0; compStatementID = 0; compBasicBlockID = 0; #endif /* Initialize emitter */ if (!compIsForInlining()) { codeGen->GetEmitter()->emitBegCG(this, compHnd); } info.compIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0; info.compPublishStubParam = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PUBLISH_SECRET_PARAM); info.compHasNextCallRetAddr = false; if (opts.IsReversePInvoke()) { bool unused; info.compCallConv = info.compCompHnd->getUnmanagedCallConv(methodInfo->ftn, nullptr, &unused); info.compArgOrder = Target::g_tgtUnmanagedArgOrder; } else { info.compCallConv = CorInfoCallConvExtension::Managed; info.compArgOrder = Target::g_tgtArgOrder; } info.compIsVarArgs = false; switch (methodInfo->args.getCallConv()) { case CORINFO_CALLCONV_NATIVEVARARG: case CORINFO_CALLCONV_VARARG: info.compIsVarArgs = true; break; default: break; } info.compRetNativeType = info.compRetType = JITtype2varType(methodInfo->args.retType); info.compUnmanagedCallCountWithGCTransition = 0; info.compLvFrameListRoot = BAD_VAR_NUM; info.compInitMem = ((methodInfo->options & CORINFO_OPT_INIT_LOCALS) != 0); /* Allocate the local variable table */ lvaInitTypeRef(); compInitDebuggingInfo(); // If are an altjit and have patchpoint info, we might need to tweak the frame size // so it's plausible for the altjit architecture. // if (!info.compMatchedVM && compileFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { assert(info.compLocalsCount == info.compPatchpointInfo->NumberOfLocals()); const int totalFrameSize = info.compPatchpointInfo->TotalFrameSize(); int frameSizeUpdate = 0; #if defined(TARGET_AMD64) if ((totalFrameSize % 16) != 8) { frameSizeUpdate = 8; } #elif defined(TARGET_ARM64) if ((totalFrameSize % 16) != 0) { frameSizeUpdate = 8; } #endif if (frameSizeUpdate != 0) { JITDUMP("Mismatched altjit + OSR -- updating tier0 frame size from %d to %d\n", totalFrameSize, totalFrameSize + frameSizeUpdate); // Allocate a local copy with altered frame size. // const unsigned patchpointInfoSize = PatchpointInfo::ComputeSize(info.compLocalsCount); PatchpointInfo* const newInfo = (PatchpointInfo*)getAllocator(CMK_Unknown).allocate<char>(patchpointInfoSize); newInfo->Initialize(info.compLocalsCount, totalFrameSize + frameSizeUpdate); newInfo->Copy(info.compPatchpointInfo); // Swap it in place. // info.compPatchpointInfo = newInfo; } } #ifdef DEBUG if (compIsForInlining()) { compBasicBlockID = impInlineInfo->InlinerCompiler->compBasicBlockID; } #endif const bool forceInline = !!(info.compFlags & CORINFO_FLG_FORCEINLINE); if (!compIsForInlining() && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // We're prejitting the root method. We also will analyze it as // a potential inline candidate. InlineResult prejitResult(this, methodHnd, "prejit"); // Profile data allows us to avoid early "too many IL bytes" outs. prejitResult.NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, fgHaveSufficientProfileData()); // Do the initial inline screen. impCanInlineIL(methodHnd, methodInfo, forceInline, &prejitResult); // Temporarily install the prejitResult as the // compInlineResult so it's available to fgFindJumpTargets // and can accumulate more observations as the IL is // scanned. // // We don't pass prejitResult in as a parameter to avoid // potential aliasing confusion -- the other call to // fgFindBasicBlocks may have set up compInlineResult and // the code in fgFindJumpTargets references that data // member extensively. assert(compInlineResult == nullptr); assert(impInlineInfo == nullptr); compInlineResult = &prejitResult; // Find the basic blocks. We must do this regardless of // inlineability, since we are prejitting this method. // // This will also update the status of this method as // an inline candidate. fgFindBasicBlocks(); // Undo the temporary setup. assert(compInlineResult == &prejitResult); compInlineResult = nullptr; // If still a viable, discretionary inline, assess // profitability. if (prejitResult.IsDiscretionaryCandidate()) { prejitResult.DetermineProfitability(methodInfo); } m_inlineStrategy->NotePrejitDecision(prejitResult); // Handle the results of the inline analysis. if (prejitResult.IsFailure()) { // This method is a bad inlinee according to our // analysis. We will let the InlineResult destructor // mark it as noinline in the prejit image to save the // jit some work. // // This decision better not be context-dependent. assert(prejitResult.IsNever()); } else { // This looks like a viable inline candidate. Since // we're not actually inlining, don't report anything. prejitResult.SetReported(); } } else { // We are jitting the root method, or inlining. fgFindBasicBlocks(); // If we are doing OSR, update flow to initially reach the appropriate IL offset. // if (opts.IsOSR()) { fgFixEntryFlowForOSR(); } } // If we're inlining and the candidate is bad, bail out. if (compDonotInline()) { goto _Next; } // We may decide to optimize this method, // to avoid spending a long time stuck in Tier0 code. // if (fgCanSwitchToOptimized()) { // We only expect to be able to do this at Tier0. // assert(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)); // Normal tiering should bail us out of Tier0 tail call induced loops. // So keep these methods in Tier0 if we're gathering PGO data. // If we're not gathering PGO, then switch these to optimized to // minimize the number of tail call helper stubs we might need. // Reconsider this if/when we're able to share those stubs. // // Honor the config setting that tells the jit to // always optimize methods with loops. // // If neither of those apply, and OSR is enabled, the jit may still // decide to optimize, if there's something in the method that // OSR currently cannot handle, or we're optionally suppressing // OSR by method hash. // const char* reason = nullptr; if (compTailPrefixSeen && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { reason = "tail.call and not BBINSTR"; } else if (compHasBackwardJump && ((info.compFlags & CORINFO_FLG_DISABLE_TIER0_FOR_LOOPS) != 0)) { reason = "loop"; } if (compHasBackwardJump && (reason == nullptr) && (JitConfig.TC_OnStackReplacement() > 0)) { const char* noPatchpointReason = nullptr; bool canEscapeViaOSR = compCanHavePatchpoints(&reason); #ifdef DEBUG if (canEscapeViaOSR) { // Optionally disable OSR by method hash. This will force any // method that might otherwise get trapped in Tier0 to be optimized. // static ConfigMethodRange JitEnableOsrRange; JitEnableOsrRange.EnsureInit(JitConfig.JitEnableOsrRange()); const unsigned hash = impInlineRoot()->info.compMethodHash(); if (!JitEnableOsrRange.Contains(hash)) { canEscapeViaOSR = false; reason = "OSR disabled by JitEnableOsrRange"; } } #endif if (canEscapeViaOSR) { JITDUMP("\nOSR enabled for this method\n"); } else { JITDUMP("\nOSR disabled for this method: %s\n", noPatchpointReason); assert(reason != nullptr); } } if (reason != nullptr) { fgSwitchToOptimized(reason); } } compSetOptimizationLevel(); #if COUNT_BASIC_BLOCKS bbCntTable.record(fgBBcount); if (fgBBcount == 1) { bbOneBBSizeTable.record(methodInfo->ILCodeSize); } #endif // COUNT_BASIC_BLOCKS #ifdef DEBUG if (verbose) { printf("Basic block list for '%s'\n", info.compFullName); fgDispBasicBlocks(); } #endif #ifdef DEBUG /* Give the function a unique number */ if (opts.disAsm || verbose) { compMethodID = ~info.compMethodHash() & 0xffff; } else { compMethodID = InterlockedIncrement(&s_compMethodsCount); } #endif if (compIsForInlining()) { compInlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_BASIC_BLOCKS, fgBBcount); if (compInlineResult->IsFailure()) { goto _Next; } } #ifdef DEBUG if ((JitConfig.DumpJittedMethods() == 1) && !compIsForInlining()) { enum { BUFSIZE = 20 }; char osrBuffer[BUFSIZE] = {0}; if (opts.IsOSR()) { // Tiering name already includes "OSR", we just want the IL offset // sprintf_s(osrBuffer, BUFSIZE, " @0x%x", info.compILEntry); } printf("Compiling %4d %s::%s, IL size = %u, hash=0x%08x %s%s%s\n", Compiler::jitTotalMethodCompiled, info.compClassName, info.compMethodName, info.compILCodeSize, info.compMethodHash(), compGetTieringName(), osrBuffer, compGetStressMessage()); } if (compIsForInlining()) { compGenTreeID = impInlineInfo->InlinerCompiler->compGenTreeID; compStatementID = impInlineInfo->InlinerCompiler->compStatementID; } #endif compCompile(methodCodePtr, methodCodeSize, compileFlags); #ifdef DEBUG if (compIsForInlining()) { impInlineInfo->InlinerCompiler->compGenTreeID = compGenTreeID; impInlineInfo->InlinerCompiler->compStatementID = compStatementID; impInlineInfo->InlinerCompiler->compBasicBlockID = compBasicBlockID; } #endif _Next: if (compDonotInline()) { // Verify we have only one inline result in play. assert(impInlineInfo->inlineResult == compInlineResult); } if (!compIsForInlining()) { compCompileFinish(); // Did we just compile for a target architecture that the VM isn't expecting? If so, the VM // can't used the generated code (and we better be an AltJit!). if (!info.compMatchedVM) { return CORJIT_SKIPPED; } #ifdef DEBUG if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT) && JitConfig.RunAltJitCode() == 0) { return CORJIT_SKIPPED; } #endif // DEBUG } /* Success! */ return CORJIT_OK; } //------------------------------------------------------------------------ // compFindLocalVarLinear: Linear search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // offs The offset value which should occur within the life of the variable. // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end or nullptr when there is no match found. // // Description: // Linear search for matching variables with their life begin and end containing // the offset. // or NULL if one couldn't be found. // // Note: // Usually called for scope count = 4. Could be called for values upto 8. // VarScopeDsc* Compiler::compFindLocalVarLinear(unsigned varNum, unsigned offs) { for (unsigned i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* dsc = &info.compVarScopes[i]; if ((dsc->vsdVarNum == varNum) && (dsc->vsdLifeBeg <= offs) && (dsc->vsdLifeEnd > offs)) { return dsc; } } return nullptr; } //------------------------------------------------------------------------ // compFindLocalVar: Search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // offs The offset value which should occur within the life of the variable. // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end. // or NULL if one couldn't be found. // // Description: // Linear search for matching variables with their life begin and end containing // the offset only when the scope count is < MAX_LINEAR_FIND_LCL_SCOPELIST, // else use the hashtable lookup. // VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned offs) { if (info.compVarScopesCount < MAX_LINEAR_FIND_LCL_SCOPELIST) { return compFindLocalVarLinear(varNum, offs); } else { VarScopeDsc* ret = compFindLocalVar(varNum, offs, offs); assert(ret == compFindLocalVarLinear(varNum, offs)); return ret; } } //------------------------------------------------------------------------ // compFindLocalVar: Search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // lifeBeg The life begin of the variable's scope // lifeEnd The life end of the variable's scope // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end, or NULL if one couldn't be found. // // Description: // Following are the steps used: // 1. Index into the hashtable using varNum. // 2. Iterate through the linked list at index varNum to find a matching // var scope. // VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd) { assert(compVarScopeMap != nullptr); VarScopeMapInfo* info; if (compVarScopeMap->Lookup(varNum, &info)) { VarScopeListNode* list = info->head; while (list != nullptr) { if ((list->data->vsdLifeBeg <= lifeBeg) && (list->data->vsdLifeEnd > lifeEnd)) { return list->data; } list = list->next; } } return nullptr; } //------------------------------------------------------------------------- // compInitVarScopeMap: Create a scope map so it can be looked up by varNum // // Description: // Map.K => Map.V :: varNum => List(ScopeDsc) // // Create a scope map that can be indexed by varNum and can be iterated // on it's values to look for matching scope when given an offs or // lifeBeg and lifeEnd. // // Notes: // 1. Build the map only when we think linear search is slow, i.e., // MAX_LINEAR_FIND_LCL_SCOPELIST is large. // 2. Linked list preserves original array order. // void Compiler::compInitVarScopeMap() { if (info.compVarScopesCount < MAX_LINEAR_FIND_LCL_SCOPELIST) { return; } assert(compVarScopeMap == nullptr); compVarScopeMap = new (getAllocator()) VarNumToScopeDscMap(getAllocator()); // 599 prime to limit huge allocations; for ex: duplicated scopes on single var. compVarScopeMap->Reallocate(min(info.compVarScopesCount, 599)); for (unsigned i = 0; i < info.compVarScopesCount; ++i) { unsigned varNum = info.compVarScopes[i].vsdVarNum; VarScopeListNode* node = VarScopeListNode::Create(&info.compVarScopes[i], getAllocator()); // Index by varNum and if the list exists append "node" to the "list". VarScopeMapInfo* info; if (compVarScopeMap->Lookup(varNum, &info)) { info->tail->next = node; info->tail = node; } // Create a new list. else { info = VarScopeMapInfo::Create(node, getAllocator()); compVarScopeMap->Set(varNum, info); } } } struct genCmpLocalVarLifeBeg { bool operator()(const VarScopeDsc* elem1, const VarScopeDsc* elem2) { return elem1->vsdLifeBeg < elem2->vsdLifeBeg; } }; struct genCmpLocalVarLifeEnd { bool operator()(const VarScopeDsc* elem1, const VarScopeDsc* elem2) { return elem1->vsdLifeEnd < elem2->vsdLifeEnd; } }; inline void Compiler::compInitScopeLists() { if (info.compVarScopesCount == 0) { compEnterScopeList = compExitScopeList = nullptr; return; } // Populate the 'compEnterScopeList' and 'compExitScopeList' lists compEnterScopeList = new (this, CMK_DebugInfo) VarScopeDsc*[info.compVarScopesCount]; compExitScopeList = new (this, CMK_DebugInfo) VarScopeDsc*[info.compVarScopesCount]; for (unsigned i = 0; i < info.compVarScopesCount; i++) { compEnterScopeList[i] = compExitScopeList[i] = &info.compVarScopes[i]; } jitstd::sort(compEnterScopeList, compEnterScopeList + info.compVarScopesCount, genCmpLocalVarLifeBeg()); jitstd::sort(compExitScopeList, compExitScopeList + info.compVarScopesCount, genCmpLocalVarLifeEnd()); } void Compiler::compResetScopeLists() { if (info.compVarScopesCount == 0) { return; } assert(compEnterScopeList && compExitScopeList); compNextEnterScope = compNextExitScope = 0; } VarScopeDsc* Compiler::compGetNextEnterScope(unsigned offs, bool scan) { assert(info.compVarScopesCount); assert(compEnterScopeList && compExitScopeList); if (compNextEnterScope < info.compVarScopesCount) { assert(compEnterScopeList[compNextEnterScope]); unsigned nextEnterOff = compEnterScopeList[compNextEnterScope]->vsdLifeBeg; assert(scan || (offs <= nextEnterOff)); if (!scan) { if (offs == nextEnterOff) { return compEnterScopeList[compNextEnterScope++]; } } else { if (nextEnterOff <= offs) { return compEnterScopeList[compNextEnterScope++]; } } } return nullptr; } VarScopeDsc* Compiler::compGetNextExitScope(unsigned offs, bool scan) { assert(info.compVarScopesCount); assert(compEnterScopeList && compExitScopeList); if (compNextExitScope < info.compVarScopesCount) { assert(compExitScopeList[compNextExitScope]); unsigned nextExitOffs = compExitScopeList[compNextExitScope]->vsdLifeEnd; assert(scan || (offs <= nextExitOffs)); if (!scan) { if (offs == nextExitOffs) { return compExitScopeList[compNextExitScope++]; } } else { if (nextExitOffs <= offs) { return compExitScopeList[compNextExitScope++]; } } } return nullptr; } // The function will call the callback functions for scopes with boundaries // at instrs from the current status of the scope lists to 'offset', // ordered by instrs. void Compiler::compProcessScopesUntil(unsigned offset, VARSET_TP* inScope, void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*), void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*)) { assert(offset != BAD_IL_OFFSET); assert(inScope != nullptr); bool foundExit = false, foundEnter = true; VarScopeDsc* scope; VarScopeDsc* nextExitScope = nullptr; VarScopeDsc* nextEnterScope = nullptr; unsigned offs = offset, curEnterOffs = 0; goto START_FINDING_SCOPES; // We need to determine the scopes which are open for the current block. // This loop walks over the missing blocks between the current and the // previous block, keeping the enter and exit offsets in lockstep. do { foundExit = foundEnter = false; if (nextExitScope) { (this->*exitScopeFn)(inScope, nextExitScope); nextExitScope = nullptr; foundExit = true; } offs = nextEnterScope ? nextEnterScope->vsdLifeBeg : offset; while ((scope = compGetNextExitScope(offs, true)) != nullptr) { foundExit = true; if (!nextEnterScope || scope->vsdLifeEnd > nextEnterScope->vsdLifeBeg) { // We overshot the last found Enter scope. Save the scope for later // and find an entering scope nextExitScope = scope; break; } (this->*exitScopeFn)(inScope, scope); } if (nextEnterScope) { (this->*enterScopeFn)(inScope, nextEnterScope); curEnterOffs = nextEnterScope->vsdLifeBeg; nextEnterScope = nullptr; foundEnter = true; } offs = nextExitScope ? nextExitScope->vsdLifeEnd : offset; START_FINDING_SCOPES: while ((scope = compGetNextEnterScope(offs, true)) != nullptr) { foundEnter = true; if ((nextExitScope && scope->vsdLifeBeg >= nextExitScope->vsdLifeEnd) || (scope->vsdLifeBeg > curEnterOffs)) { // We overshot the last found exit scope. Save the scope for later // and find an exiting scope nextEnterScope = scope; break; } (this->*enterScopeFn)(inScope, scope); if (!nextExitScope) { curEnterOffs = scope->vsdLifeBeg; } } } while (foundExit || foundEnter); } #if defined(DEBUG) void Compiler::compDispScopeLists() { unsigned i; printf("Local variable scopes = %d\n", info.compVarScopesCount); if (info.compVarScopesCount) { printf(" \tVarNum \tLVNum \t Name \tBeg \tEnd\n"); } printf("Sorted by enter scope:\n"); for (i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = compEnterScopeList[i]; assert(varScope); printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); if (compNextEnterScope == i) { printf(" <-- next enter scope"); } printf("\n"); } printf("Sorted by exit scope:\n"); for (i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = compExitScopeList[i]; assert(varScope); printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); if (compNextExitScope == i) { printf(" <-- next exit scope"); } printf("\n"); } } void Compiler::compDispLocalVars() { printf("info.compVarScopesCount = %d\n", info.compVarScopesCount); if (info.compVarScopesCount > 0) { printf(" \tVarNum \tLVNum \t Name \tBeg \tEnd\n"); } for (unsigned i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = &info.compVarScopes[i]; printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh\n", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); } } #endif // DEBUG /*****************************************************************************/ #if MEASURE_CLRAPI_CALLS struct WrapICorJitInfo : public ICorJitInfo { //------------------------------------------------------------------------ // WrapICorJitInfo::makeOne: allocate an instance of WrapICorJitInfo // // Arguments: // alloc - the allocator to get memory from for the instance // compile - the compiler instance // compHndRef - the ICorJitInfo handle from the EE; the caller's // copy may be replaced with a "wrapper" instance // // Return Value: // If the config flags indicate that ICorJitInfo should be wrapped, // we return the "wrapper" instance; otherwise we return "nullptr". static WrapICorJitInfo* makeOne(ArenaAllocator* alloc, Compiler* compiler, COMP_HANDLE& compHndRef /* INOUT */) { WrapICorJitInfo* wrap = nullptr; if (JitConfig.JitEECallTimingInfo() != 0) { // It's too early to use the default allocator, so we do this // in two steps to be safe (the constructor doesn't need to do // anything except fill in the vtable pointer, so we let the // compiler do it). void* inst = alloc->allocateMemory(roundUp(sizeof(WrapICorJitInfo))); if (inst != nullptr) { // If you get a build error here due to 'WrapICorJitInfo' being // an abstract class, it's very likely that the wrapper bodies // in ICorJitInfo_API_wrapper.hpp are no longer in sync with // the EE interface; please be kind and update the header file. wrap = new (inst, jitstd::placement_t()) WrapICorJitInfo(); wrap->wrapComp = compiler; // Save the real handle and replace it with our wrapped version. wrap->wrapHnd = compHndRef; compHndRef = wrap; } } return wrap; } private: Compiler* wrapComp; COMP_HANDLE wrapHnd; // the "real thing" public: #include "ICorJitInfo_API_wrapper.hpp" }; #endif // MEASURE_CLRAPI_CALLS /*****************************************************************************/ // Compile a single method int jitNativeCode(CORINFO_METHOD_HANDLE methodHnd, CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags, void* inlineInfoPtr) { // // A non-NULL inlineInfo means we are compiling the inlinee method. // InlineInfo* inlineInfo = (InlineInfo*)inlineInfoPtr; bool jitFallbackCompile = false; START: int result = CORJIT_INTERNALERROR; ArenaAllocator* pAlloc = nullptr; ArenaAllocator alloc; #if MEASURE_CLRAPI_CALLS WrapICorJitInfo* wrapCLR = nullptr; #endif if (inlineInfo) { // Use inliner's memory allocator when compiling the inlinee. pAlloc = inlineInfo->InlinerCompiler->compGetArenaAllocator(); } else { pAlloc = &alloc; } Compiler* pComp; pComp = nullptr; struct Param { Compiler* pComp; ArenaAllocator* pAlloc; bool jitFallbackCompile; CORINFO_METHOD_HANDLE methodHnd; CORINFO_MODULE_HANDLE classPtr; COMP_HANDLE compHnd; CORINFO_METHOD_INFO* methodInfo; void** methodCodePtr; uint32_t* methodCodeSize; JitFlags* compileFlags; InlineInfo* inlineInfo; #if MEASURE_CLRAPI_CALLS WrapICorJitInfo* wrapCLR; #endif int result; } param; param.pComp = nullptr; param.pAlloc = pAlloc; param.jitFallbackCompile = jitFallbackCompile; param.methodHnd = methodHnd; param.classPtr = classPtr; param.compHnd = compHnd; param.methodInfo = methodInfo; param.methodCodePtr = methodCodePtr; param.methodCodeSize = methodCodeSize; param.compileFlags = compileFlags; param.inlineInfo = inlineInfo; #if MEASURE_CLRAPI_CALLS param.wrapCLR = nullptr; #endif param.result = result; setErrorTrap(compHnd, Param*, pParamOuter, &param) { setErrorTrap(nullptr, Param*, pParam, pParamOuter) { if (pParam->inlineInfo) { // Lazily create the inlinee compiler object if (pParam->inlineInfo->InlinerCompiler->InlineeCompiler == nullptr) { pParam->inlineInfo->InlinerCompiler->InlineeCompiler = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); } // Use the inlinee compiler object pParam->pComp = pParam->inlineInfo->InlinerCompiler->InlineeCompiler; #ifdef DEBUG // memset(pParam->pComp, 0xEE, sizeof(Compiler)); #endif } else { // Allocate create the inliner compiler object pParam->pComp = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); } #if MEASURE_CLRAPI_CALLS pParam->wrapCLR = WrapICorJitInfo::makeOne(pParam->pAlloc, pParam->pComp, pParam->compHnd); #endif // push this compiler on the stack (TLS) pParam->pComp->prevCompiler = JitTls::GetCompiler(); JitTls::SetCompiler(pParam->pComp); // PREFIX_ASSUME gets turned into ASSERT_CHECK and we cannot have it here #if defined(_PREFAST_) || defined(_PREFIX_) PREFIX_ASSUME(pParam->pComp != NULL); #else assert(pParam->pComp != nullptr); #endif pParam->pComp->compInit(pParam->pAlloc, pParam->methodHnd, pParam->compHnd, pParam->methodInfo, pParam->inlineInfo); #ifdef DEBUG pParam->pComp->jitFallbackCompile = pParam->jitFallbackCompile; #endif // Now generate the code pParam->result = pParam->pComp->compCompile(pParam->classPtr, pParam->methodCodePtr, pParam->methodCodeSize, pParam->compileFlags); } finallyErrorTrap() { Compiler* pCompiler = pParamOuter->pComp; // If OOM is thrown when allocating memory for a pComp, we will end up here. // For this case, pComp and also pCompiler will be a nullptr // if (pCompiler != nullptr) { pCompiler->info.compCode = nullptr; // pop the compiler off the TLS stack only if it was linked above assert(JitTls::GetCompiler() == pCompiler); JitTls::SetCompiler(pCompiler->prevCompiler); } if (pParamOuter->inlineInfo == nullptr) { // Free up the allocator we were using pParamOuter->pAlloc->destroy(); } } endErrorTrap() } impJitErrorTrap() { // If we were looking at an inlinee.... if (inlineInfo != nullptr) { // Note that we failed to compile the inlinee, and that // there's no point trying to inline it again anywhere else. inlineInfo->inlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); } param.result = __errc; } endErrorTrap() result = param.result; if (!inlineInfo && (result == CORJIT_INTERNALERROR || result == CORJIT_RECOVERABLEERROR || result == CORJIT_IMPLLIMITATION) && !jitFallbackCompile) { // If we failed the JIT, reattempt with debuggable code. jitFallbackCompile = true; // Update the flags for 'safer' code generation. compileFlags->Set(JitFlags::JIT_FLAG_MIN_OPT); compileFlags->Clear(JitFlags::JIT_FLAG_SIZE_OPT); compileFlags->Clear(JitFlags::JIT_FLAG_SPEED_OPT); goto START; } return result; } #if defined(UNIX_AMD64_ABI) // GetTypeFromClassificationAndSizes: // Returns the type of the eightbyte accounting for the classification and size of the eightbyte. // // args: // classType: classification type // size: size of the eightbyte. // // static var_types Compiler::GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size) { var_types type = TYP_UNKNOWN; switch (classType) { case SystemVClassificationTypeInteger: if (size == 1) { type = TYP_BYTE; } else if (size <= 2) { type = TYP_SHORT; } else if (size <= 4) { type = TYP_INT; } else if (size <= 8) { type = TYP_LONG; } else { assert(false && "GetTypeFromClassificationAndSizes Invalid Integer classification type."); } break; case SystemVClassificationTypeIntegerReference: type = TYP_REF; break; case SystemVClassificationTypeIntegerByRef: type = TYP_BYREF; break; case SystemVClassificationTypeSSE: if (size <= 4) { type = TYP_FLOAT; } else if (size <= 8) { type = TYP_DOUBLE; } else { assert(false && "GetTypeFromClassificationAndSizes Invalid SSE classification type."); } break; default: assert(false && "GetTypeFromClassificationAndSizes Invalid classification type."); break; } return type; } //------------------------------------------------------------------- // GetEightByteType: Returns the type of eightbyte slot of a struct // // Arguments: // structDesc - struct classification description. // slotNum - eightbyte slot number for the struct. // // Return Value: // type of the eightbyte slot of the struct // // static var_types Compiler::GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum) { var_types eightByteType = TYP_UNDEF; unsigned len = structDesc.eightByteSizes[slotNum]; switch (structDesc.eightByteClassifications[slotNum]) { case SystemVClassificationTypeInteger: // See typelist.h for jit type definition. // All the types of size < 4 bytes are of jit type TYP_INT. if (structDesc.eightByteSizes[slotNum] <= 4) { eightByteType = TYP_INT; } else if (structDesc.eightByteSizes[slotNum] <= 8) { eightByteType = TYP_LONG; } else { assert(false && "GetEightByteType Invalid Integer classification type."); } break; case SystemVClassificationTypeIntegerReference: assert(len == REGSIZE_BYTES); eightByteType = TYP_REF; break; case SystemVClassificationTypeIntegerByRef: assert(len == REGSIZE_BYTES); eightByteType = TYP_BYREF; break; case SystemVClassificationTypeSSE: if (structDesc.eightByteSizes[slotNum] <= 4) { eightByteType = TYP_FLOAT; } else if (structDesc.eightByteSizes[slotNum] <= 8) { eightByteType = TYP_DOUBLE; } else { assert(false && "GetEightByteType Invalid SSE classification type."); } break; default: assert(false && "GetEightByteType Invalid classification type."); break; } return eightByteType; } //------------------------------------------------------------------------------------------------------ // GetStructTypeOffset: Gets the type, size and offset of the eightbytes of a struct for System V systems. // // Arguments: // 'structDesc' - struct description // 'type0' - out param; returns the type of the first eightbyte. // 'type1' - out param; returns the type of the second eightbyte. // 'offset0' - out param; returns the offset of the first eightbyte. // 'offset1' - out param; returns the offset of the second eightbyte. // // static void Compiler::GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1) { *offset0 = structDesc.eightByteOffsets[0]; *offset1 = structDesc.eightByteOffsets[1]; *type0 = TYP_UNKNOWN; *type1 = TYP_UNKNOWN; // Set the first eightbyte data if (structDesc.eightByteCount >= 1) { *type0 = GetEightByteType(structDesc, 0); } // Set the second eight byte data if (structDesc.eightByteCount == 2) { *type1 = GetEightByteType(structDesc, 1); } } //------------------------------------------------------------------------------------------------------ // GetStructTypeOffset: Gets the type, size and offset of the eightbytes of a struct for System V systems. // // Arguments: // 'typeHnd' - type handle // 'type0' - out param; returns the type of the first eightbyte. // 'type1' - out param; returns the type of the second eightbyte. // 'offset0' - out param; returns the offset of the first eightbyte. // 'offset1' - out param; returns the offset of the second eightbyte. // void Compiler::GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1) { SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc); assert(structDesc.passedInRegisters); GetStructTypeOffset(structDesc, type0, type1, offset0, offset1); } #endif // defined(UNIX_AMD64_ABI) /*****************************************************************************/ /*****************************************************************************/ #ifdef DEBUG Compiler::NodeToIntMap* Compiler::FindReachableNodesInNodeTestData() { NodeToIntMap* reachable = new (getAllocatorDebugOnly()) NodeToIntMap(getAllocatorDebugOnly()); if (m_nodeTestData == nullptr) { return reachable; } // Otherwise, iterate. for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->NonPhiStatements()) { for (GenTree* const tree : stmt->TreeList()) { TestLabelAndNum tlAndN; // For call nodes, translate late args to what they stand for. if (tree->OperGet() == GT_CALL) { GenTreeCall* call = tree->AsCall(); unsigned i = 0; for (GenTreeCall::Use& use : call->Args()) { if ((use.GetNode()->gtFlags & GTF_LATE_ARG) != 0) { // Find the corresponding late arg. GenTree* lateArg = call->fgArgInfo->GetArgNode(i); if (GetNodeTestData()->Lookup(lateArg, &tlAndN)) { reachable->Set(lateArg, 0); } } i++; } } if (GetNodeTestData()->Lookup(tree, &tlAndN)) { reachable->Set(tree, 0); } } } } return reachable; } void Compiler::TransferTestDataToNode(GenTree* from, GenTree* to) { TestLabelAndNum tlAndN; // We can't currently associate multiple annotations with a single node. // If we need to, we can fix this... // If the table is null, don't create it just to do the lookup, which would fail... if (m_nodeTestData != nullptr && GetNodeTestData()->Lookup(from, &tlAndN)) { assert(!GetNodeTestData()->Lookup(to, &tlAndN)); // We can't currently associate multiple annotations with a single node. // If we need to, we can fix this... TestLabelAndNum tlAndNTo; assert(!GetNodeTestData()->Lookup(to, &tlAndNTo)); GetNodeTestData()->Remove(from); GetNodeTestData()->Set(to, tlAndN); } } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX jvc XX XX XX XX Functions for the stand-alone version of the JIT . XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ void codeGeneratorCodeSizeBeg() { } /***************************************************************************** * * Used for counting pointer assignments. */ /*****************************************************************************/ void codeGeneratorCodeSizeEnd() { } /***************************************************************************** * * Gather statistics - mainly used for the standalone * Enable various #ifdef's to get the information you need */ void Compiler::compJitStats() { #if CALL_ARG_STATS /* Method types and argument statistics */ compCallArgStats(); #endif // CALL_ARG_STATS } #if CALL_ARG_STATS /***************************************************************************** * * Gather statistics about method calls and arguments */ void Compiler::compCallArgStats() { unsigned argNum; unsigned argDWordNum; unsigned argLngNum; unsigned argFltNum; unsigned argDblNum; unsigned regArgNum; unsigned regArgDeferred; unsigned regArgTemp; unsigned regArgLclVar; unsigned regArgConst; unsigned argTempsThisMethod = 0; assert(fgStmtListThreaded); for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { for (GenTree* const call : stmt->TreeList()) { if (call->gtOper != GT_CALL) continue; argNum = regArgNum = regArgDeferred = regArgTemp = regArgConst = regArgLclVar = argDWordNum = argLngNum = argFltNum = argDblNum = 0; argTotalCalls++; if (call->AsCall()->gtCallThisArg == nullptr) { if (call->AsCall()->gtCallType == CT_HELPER) { argHelperCalls++; } else { argStaticCalls++; } } else { /* We have a 'this' pointer */ argDWordNum++; argNum++; regArgNum++; regArgDeferred++; argTotalObjPtr++; if (call->AsCall()->IsVirtual()) { /* virtual function */ argVirtualCalls++; } else { argNonVirtualCalls++; } } } } } argTempsCntTable.record(argTempsThisMethod); if (argMaxTempsPerMethod < argTempsThisMethod) { argMaxTempsPerMethod = argTempsThisMethod; } } /* static */ void Compiler::compDispCallArgStats(FILE* fout) { if (argTotalCalls == 0) return; fprintf(fout, "\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Call stats\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Total # of calls = %d, calls / method = %.3f\n\n", argTotalCalls, (float)argTotalCalls / genMethodCnt); fprintf(fout, "Percentage of helper calls = %4.2f %%\n", (float)(100 * argHelperCalls) / argTotalCalls); fprintf(fout, "Percentage of static calls = %4.2f %%\n", (float)(100 * argStaticCalls) / argTotalCalls); fprintf(fout, "Percentage of virtual calls = %4.2f %%\n", (float)(100 * argVirtualCalls) / argTotalCalls); fprintf(fout, "Percentage of non-virtual calls = %4.2f %%\n\n", (float)(100 * argNonVirtualCalls) / argTotalCalls); fprintf(fout, "Average # of arguments per call = %.2f%%\n\n", (float)argTotalArgs / argTotalCalls); fprintf(fout, "Percentage of DWORD arguments = %.2f %%\n", (float)(100 * argTotalDWordArgs) / argTotalArgs); fprintf(fout, "Percentage of LONG arguments = %.2f %%\n", (float)(100 * argTotalLongArgs) / argTotalArgs); fprintf(fout, "Percentage of FLOAT arguments = %.2f %%\n", (float)(100 * argTotalFloatArgs) / argTotalArgs); fprintf(fout, "Percentage of DOUBLE arguments = %.2f %%\n\n", (float)(100 * argTotalDoubleArgs) / argTotalArgs); if (argTotalRegArgs == 0) return; /* fprintf(fout, "Total deferred arguments = %d \n", argTotalDeferred); fprintf(fout, "Total temp arguments = %d \n\n", argTotalTemps); fprintf(fout, "Total 'this' arguments = %d \n", argTotalObjPtr); fprintf(fout, "Total local var arguments = %d \n", argTotalLclVar); fprintf(fout, "Total constant arguments = %d \n\n", argTotalConst); */ fprintf(fout, "\nRegister Arguments:\n\n"); fprintf(fout, "Percentage of deferred arguments = %.2f %%\n", (float)(100 * argTotalDeferred) / argTotalRegArgs); fprintf(fout, "Percentage of temp arguments = %.2f %%\n\n", (float)(100 * argTotalTemps) / argTotalRegArgs); fprintf(fout, "Maximum # of temps per method = %d\n\n", argMaxTempsPerMethod); fprintf(fout, "Percentage of ObjPtr arguments = %.2f %%\n", (float)(100 * argTotalObjPtr) / argTotalRegArgs); // fprintf(fout, "Percentage of global arguments = %.2f %%\n", (float)(100 * argTotalDWordGlobEf) / // argTotalRegArgs); fprintf(fout, "Percentage of constant arguments = %.2f %%\n", (float)(100 * argTotalConst) / argTotalRegArgs); fprintf(fout, "Percentage of lcl var arguments = %.2f %%\n\n", (float)(100 * argTotalLclVar) / argTotalRegArgs); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Argument count frequency table (includes ObjPtr):\n"); fprintf(fout, "--------------------------------------------------\n"); argCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "DWORD argument count frequency table (w/o LONG):\n"); fprintf(fout, "--------------------------------------------------\n"); argDWordCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Temps count frequency table (per method):\n"); fprintf(fout, "--------------------------------------------------\n"); argTempsCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); /* fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "DWORD argument count frequency table (w/ LONG):\n"); fprintf(fout, "--------------------------------------------------\n"); argDWordLngCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); */ } #endif // CALL_ARG_STATS // JIT time end to end, and by phases. #ifdef FEATURE_JIT_METHOD_PERF // Static variables CritSecObject CompTimeSummaryInfo::s_compTimeSummaryLock; CompTimeSummaryInfo CompTimeSummaryInfo::s_compTimeSummary; #if MEASURE_CLRAPI_CALLS double JitTimer::s_cyclesPerSec = CachedCyclesPerSecond(); #endif #endif // FEATURE_JIT_METHOD_PERF #if defined(FEATURE_JIT_METHOD_PERF) || DUMP_FLOWGRAPHS || defined(FEATURE_TRACELOGGING) const char* PhaseNames[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) string_nm, #include "compphases.h" }; const char* PhaseEnums[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) #enum_nm, #include "compphases.h" }; const LPCWSTR PhaseShortNames[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) W(short_nm), #include "compphases.h" }; #endif // defined(FEATURE_JIT_METHOD_PERF) || DUMP_FLOWGRAPHS #ifdef FEATURE_JIT_METHOD_PERF bool PhaseHasChildren[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) hasChildren, #include "compphases.h" }; int PhaseParent[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) parent, #include "compphases.h" }; bool PhaseReportsIRSize[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) measureIR, #include "compphases.h" }; CompTimeInfo::CompTimeInfo(unsigned byteCodeBytes) : m_byteCodeBytes(byteCodeBytes) , m_totalCycles(0) , m_parentPhaseEndSlop(0) , m_timerFailure(false) #if MEASURE_CLRAPI_CALLS , m_allClrAPIcalls(0) , m_allClrAPIcycles(0) #endif { for (int i = 0; i < PHASE_NUMBER_OF; i++) { m_invokesByPhase[i] = 0; m_cyclesByPhase[i] = 0; #if MEASURE_CLRAPI_CALLS m_CLRinvokesByPhase[i] = 0; m_CLRcyclesByPhase[i] = 0; #endif } #if MEASURE_CLRAPI_CALLS assert(ArrLen(m_perClrAPIcalls) == API_ICorJitInfo_Names::API_COUNT); assert(ArrLen(m_perClrAPIcycles) == API_ICorJitInfo_Names::API_COUNT); assert(ArrLen(m_maxClrAPIcycles) == API_ICorJitInfo_Names::API_COUNT); for (int i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { m_perClrAPIcalls[i] = 0; m_perClrAPIcycles[i] = 0; m_maxClrAPIcycles[i] = 0; } #endif } bool CompTimeSummaryInfo::IncludedInFilteredData(CompTimeInfo& info) { return false; // info.m_byteCodeBytes < 10; } //------------------------------------------------------------------------ // CompTimeSummaryInfo::AddInfo: Record timing info from one compile. // // Arguments: // info - The timing information to record. // includePhases - If "true", the per-phase info in "info" is valid, // which means that a "normal" compile has ended; if // the value is "false" we are recording the results // of a partial compile (typically an import-only run // on behalf of the inliner) in which case the phase // info is not valid and so we only record EE call // overhead. void CompTimeSummaryInfo::AddInfo(CompTimeInfo& info, bool includePhases) { if (info.m_timerFailure) { return; // Don't update if there was a failure. } CritSecHolder timeLock(s_compTimeSummaryLock); if (includePhases) { bool includeInFiltered = IncludedInFilteredData(info); m_numMethods++; // Update the totals and maxima. m_total.m_byteCodeBytes += info.m_byteCodeBytes; m_maximum.m_byteCodeBytes = max(m_maximum.m_byteCodeBytes, info.m_byteCodeBytes); m_total.m_totalCycles += info.m_totalCycles; m_maximum.m_totalCycles = max(m_maximum.m_totalCycles, info.m_totalCycles); #if MEASURE_CLRAPI_CALLS // Update the CLR-API values. m_total.m_allClrAPIcalls += info.m_allClrAPIcalls; m_maximum.m_allClrAPIcalls = max(m_maximum.m_allClrAPIcalls, info.m_allClrAPIcalls); m_total.m_allClrAPIcycles += info.m_allClrAPIcycles; m_maximum.m_allClrAPIcycles = max(m_maximum.m_allClrAPIcycles, info.m_allClrAPIcycles); #endif if (includeInFiltered) { m_numFilteredMethods++; m_filtered.m_byteCodeBytes += info.m_byteCodeBytes; m_filtered.m_totalCycles += info.m_totalCycles; m_filtered.m_parentPhaseEndSlop += info.m_parentPhaseEndSlop; } for (int i = 0; i < PHASE_NUMBER_OF; i++) { m_total.m_invokesByPhase[i] += info.m_invokesByPhase[i]; m_total.m_cyclesByPhase[i] += info.m_cyclesByPhase[i]; #if MEASURE_CLRAPI_CALLS m_total.m_CLRinvokesByPhase[i] += info.m_CLRinvokesByPhase[i]; m_total.m_CLRcyclesByPhase[i] += info.m_CLRcyclesByPhase[i]; #endif if (includeInFiltered) { m_filtered.m_invokesByPhase[i] += info.m_invokesByPhase[i]; m_filtered.m_cyclesByPhase[i] += info.m_cyclesByPhase[i]; #if MEASURE_CLRAPI_CALLS m_filtered.m_CLRinvokesByPhase[i] += info.m_CLRinvokesByPhase[i]; m_filtered.m_CLRcyclesByPhase[i] += info.m_CLRcyclesByPhase[i]; #endif } m_maximum.m_cyclesByPhase[i] = max(m_maximum.m_cyclesByPhase[i], info.m_cyclesByPhase[i]); #if MEASURE_CLRAPI_CALLS m_maximum.m_CLRcyclesByPhase[i] = max(m_maximum.m_CLRcyclesByPhase[i], info.m_CLRcyclesByPhase[i]); #endif } m_total.m_parentPhaseEndSlop += info.m_parentPhaseEndSlop; m_maximum.m_parentPhaseEndSlop = max(m_maximum.m_parentPhaseEndSlop, info.m_parentPhaseEndSlop); } #if MEASURE_CLRAPI_CALLS else { m_totMethods++; // Update the "global" CLR-API values. m_total.m_allClrAPIcalls += info.m_allClrAPIcalls; m_maximum.m_allClrAPIcalls = max(m_maximum.m_allClrAPIcalls, info.m_allClrAPIcalls); m_total.m_allClrAPIcycles += info.m_allClrAPIcycles; m_maximum.m_allClrAPIcycles = max(m_maximum.m_allClrAPIcycles, info.m_allClrAPIcycles); // Update the per-phase CLR-API values. m_total.m_invokesByPhase[PHASE_CLR_API] += info.m_allClrAPIcalls; m_maximum.m_invokesByPhase[PHASE_CLR_API] = max(m_maximum.m_perClrAPIcalls[PHASE_CLR_API], info.m_allClrAPIcalls); m_total.m_cyclesByPhase[PHASE_CLR_API] += info.m_allClrAPIcycles; m_maximum.m_cyclesByPhase[PHASE_CLR_API] = max(m_maximum.m_cyclesByPhase[PHASE_CLR_API], info.m_allClrAPIcycles); } for (int i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { m_total.m_perClrAPIcalls[i] += info.m_perClrAPIcalls[i]; m_maximum.m_perClrAPIcalls[i] = max(m_maximum.m_perClrAPIcalls[i], info.m_perClrAPIcalls[i]); m_total.m_perClrAPIcycles[i] += info.m_perClrAPIcycles[i]; m_maximum.m_perClrAPIcycles[i] = max(m_maximum.m_perClrAPIcycles[i], info.m_perClrAPIcycles[i]); m_maximum.m_maxClrAPIcycles[i] = max(m_maximum.m_maxClrAPIcycles[i], info.m_maxClrAPIcycles[i]); } #endif } // Static LPCWSTR Compiler::compJitTimeLogFilename = nullptr; void CompTimeSummaryInfo::Print(FILE* f) { if (f == nullptr) { return; } // Otherwise... double countsPerSec = CachedCyclesPerSecond(); if (countsPerSec == 0.0) { fprintf(f, "Processor does not have a high-frequency timer.\n"); return; } double totTime_ms = 0.0; fprintf(f, "JIT Compilation time report:\n"); fprintf(f, " Compiled %d methods.\n", m_numMethods); if (m_numMethods != 0) { fprintf(f, " Compiled %d bytecodes total (%d max, %8.2f avg).\n", m_total.m_byteCodeBytes, m_maximum.m_byteCodeBytes, (double)m_total.m_byteCodeBytes / (double)m_numMethods); totTime_ms = ((double)m_total.m_totalCycles / countsPerSec) * 1000.0; fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n", ((double)m_total.m_totalCycles / 1000000.0), totTime_ms); fprintf(f, " max: %10.3f Mcycles/%10.3f ms\n", ((double)m_maximum.m_totalCycles) / 1000000.0, ((double)m_maximum.m_totalCycles / countsPerSec) * 1000.0); fprintf(f, " avg: %10.3f Mcycles/%10.3f ms\n", ((double)m_total.m_totalCycles) / 1000000.0 / (double)m_numMethods, totTime_ms / (double)m_numMethods); const char* extraHdr1 = ""; const char* extraHdr2 = ""; #if MEASURE_CLRAPI_CALLS bool extraInfo = (JitConfig.JitEECallTimingInfo() != 0); if (extraInfo) { extraHdr1 = " CLRs/meth % in CLR"; extraHdr2 = "-----------------------"; } #endif fprintf(f, "\n Total time by phases:\n"); fprintf(f, " PHASE inv/meth Mcycles time (ms) %% of total max (ms)%s\n", extraHdr1); fprintf(f, " ---------------------------------------------------------------------------------------%s\n", extraHdr2); // Ensure that at least the names array and the Phases enum have the same number of entries: assert(ArrLen(PhaseNames) == PHASE_NUMBER_OF); for (int i = 0; i < PHASE_NUMBER_OF; i++) { double phase_tot_ms = (((double)m_total.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; double phase_max_ms = (((double)m_maximum.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; #if MEASURE_CLRAPI_CALLS // Skip showing CLR API call info if we didn't collect any if (i == PHASE_CLR_API && !extraInfo) continue; #endif // Indent nested phases, according to depth. int ancPhase = PhaseParent[i]; while (ancPhase != -1) { fprintf(f, " "); ancPhase = PhaseParent[ancPhase]; } fprintf(f, " %-30s %6.2f %10.2f %9.3f %8.2f%% %8.3f", PhaseNames[i], ((double)m_total.m_invokesByPhase[i]) / ((double)m_numMethods), ((double)m_total.m_cyclesByPhase[i]) / 1000000.0, phase_tot_ms, (phase_tot_ms * 100.0 / totTime_ms), phase_max_ms); #if MEASURE_CLRAPI_CALLS if (extraInfo && i != PHASE_CLR_API) { double nest_tot_ms = (((double)m_total.m_CLRcyclesByPhase[i]) / countsPerSec) * 1000.0; double nest_percent = nest_tot_ms * 100.0 / totTime_ms; double calls_per_fn = ((double)m_total.m_CLRinvokesByPhase[i]) / ((double)m_numMethods); if (nest_percent > 0.1 || calls_per_fn > 10) fprintf(f, " %5.1f %8.2f%%", calls_per_fn, nest_percent); } #endif fprintf(f, "\n"); } // Show slop if it's over a certain percentage of the total double pslop_pct = 100.0 * m_total.m_parentPhaseEndSlop * 1000.0 / countsPerSec / totTime_ms; if (pslop_pct >= 1.0) { fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " "%3.1f%% of total.\n\n", m_total.m_parentPhaseEndSlop / 1000000.0, pslop_pct); } } if (m_numFilteredMethods > 0) { fprintf(f, " Compiled %d methods that meet the filter requirement.\n", m_numFilteredMethods); fprintf(f, " Compiled %d bytecodes total (%8.2f avg).\n", m_filtered.m_byteCodeBytes, (double)m_filtered.m_byteCodeBytes / (double)m_numFilteredMethods); double totTime_ms = ((double)m_filtered.m_totalCycles / countsPerSec) * 1000.0; fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n", ((double)m_filtered.m_totalCycles / 1000000.0), totTime_ms); fprintf(f, " avg: %10.3f Mcycles/%10.3f ms\n", ((double)m_filtered.m_totalCycles) / 1000000.0 / (double)m_numFilteredMethods, totTime_ms / (double)m_numFilteredMethods); fprintf(f, " Total time by phases:\n"); fprintf(f, " PHASE inv/meth Mcycles time (ms) %% of total\n"); fprintf(f, " --------------------------------------------------------------------------------------\n"); // Ensure that at least the names array and the Phases enum have the same number of entries: assert(ArrLen(PhaseNames) == PHASE_NUMBER_OF); for (int i = 0; i < PHASE_NUMBER_OF; i++) { double phase_tot_ms = (((double)m_filtered.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; // Indent nested phases, according to depth. int ancPhase = PhaseParent[i]; while (ancPhase != -1) { fprintf(f, " "); ancPhase = PhaseParent[ancPhase]; } fprintf(f, " %-30s %5.2f %10.2f %9.3f %8.2f%%\n", PhaseNames[i], ((double)m_filtered.m_invokesByPhase[i]) / ((double)m_numFilteredMethods), ((double)m_filtered.m_cyclesByPhase[i]) / 1000000.0, phase_tot_ms, (phase_tot_ms * 100.0 / totTime_ms)); } double fslop_ms = m_filtered.m_parentPhaseEndSlop * 1000.0 / countsPerSec; if (fslop_ms > 1.0) { fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " "%3.1f%% of total.\n\n", m_filtered.m_parentPhaseEndSlop / 1000000.0, fslop_ms); } } #if MEASURE_CLRAPI_CALLS if (m_total.m_allClrAPIcalls > 0 && m_total.m_allClrAPIcycles > 0) { fprintf(f, "\n"); if (m_totMethods > 0) fprintf(f, " Imported %u methods.\n\n", m_numMethods + m_totMethods); fprintf(f, " CLR API # calls total time max time avg time %% " "of total\n"); fprintf(f, " -------------------------------------------------------------------------------"); fprintf(f, "---------------------\n"); static const char* APInames[] = { #define DEF_CLR_API(name) #name, #include "ICorJitInfo_API_names.h" }; unsigned shownCalls = 0; double shownMillis = 0.0; #ifdef DEBUG unsigned checkedCalls = 0; double checkedMillis = 0.0; #endif for (unsigned pass = 0; pass < 2; pass++) { for (unsigned i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { unsigned calls = m_total.m_perClrAPIcalls[i]; if (calls == 0) continue; unsigned __int64 cycles = m_total.m_perClrAPIcycles[i]; double millis = 1000.0 * cycles / countsPerSec; // Don't show the small fry to keep the results manageable if (millis < 0.5) { // We always show the following API because it is always called // exactly once for each method and its body is the simplest one // possible (it just returns an integer constant), and therefore // it can be used to measure the overhead of adding the CLR API // timing code. Roughly speaking, on a 3GHz x64 box the overhead // per call should be around 40 ns when using RDTSC, compared to // about 140 ns when using GetThreadCycles() under Windows. if (i != API_ICorJitInfo_Names::API_getExpectedTargetArchitecture) continue; } // In the first pass we just compute the totals. if (pass == 0) { shownCalls += m_total.m_perClrAPIcalls[i]; shownMillis += millis; continue; } unsigned __int32 maxcyc = m_maximum.m_maxClrAPIcycles[i]; double max_ms = 1000.0 * maxcyc / countsPerSec; fprintf(f, " %-40s", APInames[i]); // API name fprintf(f, " %8u %9.1f ms", calls, millis); // #calls, total time fprintf(f, " %8.1f ms %8.1f ns", max_ms, 1000000.0 * millis / calls); // max, avg time fprintf(f, " %5.1f%%\n", 100.0 * millis / shownMillis); // % of total #ifdef DEBUG checkedCalls += m_total.m_perClrAPIcalls[i]; checkedMillis += millis; #endif } } #ifdef DEBUG assert(checkedCalls == shownCalls); assert(checkedMillis == shownMillis); #endif if (shownCalls > 0 || shownMillis > 0) { fprintf(f, " -------------------------"); fprintf(f, "---------------------------------------------------------------------------\n"); fprintf(f, " Total for calls shown above %8u %10.1f ms", shownCalls, shownMillis); if (totTime_ms > 0.0) fprintf(f, " (%4.1lf%% of overall JIT time)", shownMillis * 100.0 / totTime_ms); fprintf(f, "\n"); } fprintf(f, "\n"); } #endif fprintf(f, "\n"); } JitTimer::JitTimer(unsigned byteCodeSize) : m_info(byteCodeSize) { #if MEASURE_CLRAPI_CALLS m_CLRcallInvokes = 0; m_CLRcallCycles = 0; #endif #ifdef DEBUG m_lastPhase = (Phases)-1; #if MEASURE_CLRAPI_CALLS m_CLRcallAPInum = -1; #endif #endif unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { m_start = threadCurCycles; m_curPhaseStart = threadCurCycles; } } void JitTimer::EndPhase(Compiler* compiler, Phases phase) { // Otherwise... // We re-run some phases currently, so this following assert doesn't work. // assert((int)phase > (int)m_lastPhase); // We should end phases in increasing order. unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { unsigned __int64 phaseCycles = (threadCurCycles - m_curPhaseStart); // If this is not a leaf phase, the assumption is that the last subphase must have just recently ended. // Credit the duration to "slop", the total of which should be very small. if (PhaseHasChildren[phase]) { m_info.m_parentPhaseEndSlop += phaseCycles; } else { // It is a leaf phase. Credit duration to it. m_info.m_invokesByPhase[phase]++; m_info.m_cyclesByPhase[phase] += phaseCycles; #if MEASURE_CLRAPI_CALLS // Record the CLR API timing info as well. m_info.m_CLRinvokesByPhase[phase] += m_CLRcallInvokes; m_info.m_CLRcyclesByPhase[phase] += m_CLRcallCycles; #endif // Credit the phase's ancestors, if any. int ancPhase = PhaseParent[phase]; while (ancPhase != -1) { m_info.m_cyclesByPhase[ancPhase] += phaseCycles; ancPhase = PhaseParent[ancPhase]; } #if MEASURE_CLRAPI_CALLS const Phases lastPhase = PHASE_CLR_API; #else const Phases lastPhase = PHASE_NUMBER_OF; #endif if (phase + 1 == lastPhase) { m_info.m_totalCycles = (threadCurCycles - m_start); } else { m_curPhaseStart = threadCurCycles; } } if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[phase]) { m_info.m_nodeCountAfterPhase[phase] = compiler->fgMeasureIR(); } else { m_info.m_nodeCountAfterPhase[phase] = 0; } } #ifdef DEBUG m_lastPhase = phase; #endif #if MEASURE_CLRAPI_CALLS m_CLRcallInvokes = 0; m_CLRcallCycles = 0; #endif } #if MEASURE_CLRAPI_CALLS //------------------------------------------------------------------------ // JitTimer::CLRApiCallEnter: Start the stopwatch for an EE call. // // Arguments: // apix - The API index - an "enum API_ICorJitInfo_Names" value. // void JitTimer::CLRApiCallEnter(unsigned apix) { assert(m_CLRcallAPInum == -1); // Nested calls not allowed m_CLRcallAPInum = apix; // If we can't get the cycles, we'll just ignore this call if (!_our_GetThreadCycles(&m_CLRcallStart)) m_CLRcallStart = 0; } //------------------------------------------------------------------------ // JitTimer::CLRApiCallLeave: compute / record time spent in an EE call. // // Arguments: // apix - The API's "enum API_ICorJitInfo_Names" value; this value // should match the value passed to the most recent call to // "CLRApiCallEnter" (i.e. these must come as matched pairs), // and they also may not nest. // void JitTimer::CLRApiCallLeave(unsigned apix) { // Make sure we're actually inside a measured CLR call. assert(m_CLRcallAPInum != -1); m_CLRcallAPInum = -1; // Ignore this one if we don't have a valid starting counter. if (m_CLRcallStart != 0) { if (JitConfig.JitEECallTimingInfo() != 0) { unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { // Compute the cycles spent in the call. threadCurCycles -= m_CLRcallStart; // Add the cycles to the 'phase' and bump its use count. m_info.m_cyclesByPhase[PHASE_CLR_API] += threadCurCycles; m_info.m_invokesByPhase[PHASE_CLR_API] += 1; // Add the values to the "per API" info. m_info.m_allClrAPIcycles += threadCurCycles; m_info.m_allClrAPIcalls += 1; m_info.m_perClrAPIcalls[apix] += 1; m_info.m_perClrAPIcycles[apix] += threadCurCycles; m_info.m_maxClrAPIcycles[apix] = max(m_info.m_maxClrAPIcycles[apix], (unsigned __int32)threadCurCycles); // Subtract the cycles from the enclosing phase by bumping its start time m_curPhaseStart += threadCurCycles; // Update the running totals. m_CLRcallInvokes += 1; m_CLRcallCycles += threadCurCycles; } } m_CLRcallStart = 0; } assert(m_CLRcallAPInum != -1); // No longer in this API call. m_CLRcallAPInum = -1; } #endif // MEASURE_CLRAPI_CALLS CritSecObject JitTimer::s_csvLock; // It's expensive to constantly open and close the file, so open it once and close it // when the process exits. This should be accessed under the s_csvLock. FILE* JitTimer::s_csvFile = nullptr; LPCWSTR Compiler::JitTimeLogCsv() { LPCWSTR jitTimeLogCsv = JitConfig.JitTimeLogCsv(); return jitTimeLogCsv; } void JitTimer::PrintCsvHeader() { LPCWSTR jitTimeLogCsv = Compiler::JitTimeLogCsv(); if (jitTimeLogCsv == nullptr) { return; } CritSecHolder csvLock(s_csvLock); if (s_csvFile == nullptr) { s_csvFile = _wfopen(jitTimeLogCsv, W("a")); } if (s_csvFile != nullptr) { // Seek to the end of the file s.t. `ftell` doesn't lie to us on Windows fseek(s_csvFile, 0, SEEK_END); // Write the header if the file is empty if (ftell(s_csvFile) == 0) { fprintf(s_csvFile, "\"Method Name\","); fprintf(s_csvFile, "\"Assembly or SPMI Index\","); fprintf(s_csvFile, "\"IL Bytes\","); fprintf(s_csvFile, "\"Basic Blocks\","); fprintf(s_csvFile, "\"Min Opts\","); fprintf(s_csvFile, "\"Loops\","); fprintf(s_csvFile, "\"Loops Cloned\","); #if FEATURE_LOOP_ALIGN #ifdef DEBUG fprintf(s_csvFile, "\"Alignment Candidates\","); fprintf(s_csvFile, "\"Loops Aligned\","); #endif // DEBUG #endif // FEATURE_LOOP_ALIGN for (int i = 0; i < PHASE_NUMBER_OF; i++) { fprintf(s_csvFile, "\"%s\",", PhaseNames[i]); if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[i]) { fprintf(s_csvFile, "\"Node Count After %s\",", PhaseNames[i]); } } InlineStrategy::DumpCsvHeader(s_csvFile); fprintf(s_csvFile, "\"Executable Code Bytes\","); fprintf(s_csvFile, "\"GC Info Bytes\","); fprintf(s_csvFile, "\"Total Bytes Allocated\","); fprintf(s_csvFile, "\"Total Cycles\","); fprintf(s_csvFile, "\"CPS\"\n"); fflush(s_csvFile); } } } void JitTimer::PrintCsvMethodStats(Compiler* comp) { LPCWSTR jitTimeLogCsv = Compiler::JitTimeLogCsv(); if (jitTimeLogCsv == nullptr) { return; } // eeGetMethodFullName uses locks, so don't enter crit sec before this call. #if defined(DEBUG) || defined(LATE_DISASM) // If we already have computed the name because for some reason we're generating the CSV // for a DEBUG build (presumably not for the time info), just re-use it. const char* methName = comp->info.compFullName; #else const char* methName = comp->eeGetMethodFullName(comp->info.compMethodHnd); #endif // Try and access the SPMI index to report in the data set. // // If the jit is not hosted under SPMI this will return the // default value of zero. // // Query the jit host directly here instead of going via the // config cache, since value will change for each method. int index = g_jitHost->getIntConfigValue(W("SuperPMIMethodContextNumber"), -1); CritSecHolder csvLock(s_csvLock); if (s_csvFile == nullptr) { return; } fprintf(s_csvFile, "\"%s\",", methName); if (index != 0) { fprintf(s_csvFile, "%d,", index); } else { const char* methodAssemblyName = comp->info.compCompHnd->getAssemblyName( comp->info.compCompHnd->getModuleAssembly(comp->info.compCompHnd->getClassModule(comp->info.compClassHnd))); fprintf(s_csvFile, "\"%s\",", methodAssemblyName); } fprintf(s_csvFile, "%u,", comp->info.compILCodeSize); fprintf(s_csvFile, "%u,", comp->fgBBcount); fprintf(s_csvFile, "%u,", comp->opts.MinOpts()); fprintf(s_csvFile, "%u,", comp->optLoopCount); fprintf(s_csvFile, "%u,", comp->optLoopsCloned); #if FEATURE_LOOP_ALIGN #ifdef DEBUG fprintf(s_csvFile, "%u,", comp->loopAlignCandidates); fprintf(s_csvFile, "%u,", comp->loopsAligned); #endif // DEBUG #endif // FEATURE_LOOP_ALIGN unsigned __int64 totCycles = 0; for (int i = 0; i < PHASE_NUMBER_OF; i++) { if (!PhaseHasChildren[i]) { totCycles += m_info.m_cyclesByPhase[i]; } fprintf(s_csvFile, "%I64u,", m_info.m_cyclesByPhase[i]); if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[i]) { fprintf(s_csvFile, "%u,", m_info.m_nodeCountAfterPhase[i]); } } comp->m_inlineStrategy->DumpCsvData(s_csvFile); fprintf(s_csvFile, "%u,", comp->info.compNativeCodeSize); fprintf(s_csvFile, "%Iu,", comp->compInfoBlkSize); fprintf(s_csvFile, "%Iu,", comp->compGetArenaAllocator()->getTotalBytesAllocated()); fprintf(s_csvFile, "%I64u,", m_info.m_totalCycles); fprintf(s_csvFile, "%f\n", CachedCyclesPerSecond()); fflush(s_csvFile); } // Perform process shutdown actions. // // static void JitTimer::Shutdown() { CritSecHolder csvLock(s_csvLock); if (s_csvFile != nullptr) { fclose(s_csvFile); } } // Completes the timing of the current method, and adds it to "sum". void JitTimer::Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases) { if (includePhases) { PrintCsvMethodStats(comp); } sum.AddInfo(m_info, includePhases); } #endif // FEATURE_JIT_METHOD_PERF #if LOOP_HOIST_STATS // Static fields. CritSecObject Compiler::s_loopHoistStatsLock; // Default constructor. unsigned Compiler::s_loopsConsidered = 0; unsigned Compiler::s_loopsWithHoistedExpressions = 0; unsigned Compiler::s_totalHoistedExpressions = 0; // static void Compiler::PrintAggregateLoopHoistStats(FILE* f) { fprintf(f, "\n"); fprintf(f, "---------------------------------------------------\n"); fprintf(f, "Loop hoisting stats\n"); fprintf(f, "---------------------------------------------------\n"); double pctWithHoisted = 0.0; if (s_loopsConsidered > 0) { pctWithHoisted = 100.0 * (double(s_loopsWithHoistedExpressions) / double(s_loopsConsidered)); } double exprsPerLoopWithExpr = 0.0; if (s_loopsWithHoistedExpressions > 0) { exprsPerLoopWithExpr = double(s_totalHoistedExpressions) / double(s_loopsWithHoistedExpressions); } fprintf(f, "Considered %d loops. Of these, we hoisted expressions out of %d (%6.2f%%).\n", s_loopsConsidered, s_loopsWithHoistedExpressions, pctWithHoisted); fprintf(f, " A total of %d expressions were hoisted, an average of %5.2f per loop-with-hoisted-expr.\n", s_totalHoistedExpressions, exprsPerLoopWithExpr); } void Compiler::AddLoopHoistStats() { CritSecHolder statsLock(s_loopHoistStatsLock); s_loopsConsidered += m_loopsConsidered; s_loopsWithHoistedExpressions += m_loopsWithHoistedExpressions; s_totalHoistedExpressions += m_totalHoistedExpressions; } void Compiler::PrintPerMethodLoopHoistStats() { double pctWithHoisted = 0.0; if (m_loopsConsidered > 0) { pctWithHoisted = 100.0 * (double(m_loopsWithHoistedExpressions) / double(m_loopsConsidered)); } double exprsPerLoopWithExpr = 0.0; if (m_loopsWithHoistedExpressions > 0) { exprsPerLoopWithExpr = double(m_totalHoistedExpressions) / double(m_loopsWithHoistedExpressions); } printf("Considered %d loops. Of these, we hoisted expressions out of %d (%5.2f%%).\n", m_loopsConsidered, m_loopsWithHoistedExpressions, pctWithHoisted); printf(" A total of %d expressions were hoisted, an average of %5.2f per loop-with-hoisted-expr.\n", m_totalHoistedExpressions, exprsPerLoopWithExpr); } #endif // LOOP_HOIST_STATS //------------------------------------------------------------------------ // RecordStateAtEndOfInlining: capture timing data (if enabled) after // inlining as completed. // // Note: // Records data needed for SQM and inlining data dumps. Should be // called after inlining is complete. (We do this after inlining // because this marks the last point at which the JIT is likely to // cause type-loading and class initialization). void Compiler::RecordStateAtEndOfInlining() { #if defined(DEBUG) || defined(INLINE_DATA) m_compCyclesAtEndOfInlining = 0; m_compTickCountAtEndOfInlining = 0; bool b = CycleTimer::GetThreadCyclesS(&m_compCyclesAtEndOfInlining); if (!b) { return; // We don't have a thread cycle counter. } m_compTickCountAtEndOfInlining = GetTickCount(); #endif // defined(DEBUG) || defined(INLINE_DATA) } //------------------------------------------------------------------------ // RecordStateAtEndOfCompilation: capture timing data (if enabled) after // compilation is completed. void Compiler::RecordStateAtEndOfCompilation() { #if defined(DEBUG) || defined(INLINE_DATA) // Common portion m_compCycles = 0; unsigned __int64 compCyclesAtEnd; bool b = CycleTimer::GetThreadCyclesS(&compCyclesAtEnd); if (!b) { return; // We don't have a thread cycle counter. } assert(compCyclesAtEnd >= m_compCyclesAtEndOfInlining); m_compCycles = compCyclesAtEnd - m_compCyclesAtEndOfInlining; #endif // defined(DEBUG) || defined(INLINE_DATA) } #if FUNC_INFO_LOGGING // static LPCWSTR Compiler::compJitFuncInfoFilename = nullptr; // static FILE* Compiler::compJitFuncInfoFile = nullptr; #endif // FUNC_INFO_LOGGING #ifdef DEBUG // dumpConvertedVarSet() dumps the varset bits that are tracked // variable indices, and we convert them to variable numbers, sort the variable numbers, and // print them as variable numbers. To do this, we use a temporary set indexed by // variable number. We can't use the "all varset" type because it is still size-limited, and might // not be big enough to handle all possible variable numbers. void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars) { BYTE* pVarNumSet; // trivial set: one byte per varNum, 0 means not in set, 1 means in set. size_t varNumSetBytes = comp->lvaCount * sizeof(BYTE); pVarNumSet = (BYTE*)_alloca(varNumSetBytes); memset(pVarNumSet, 0, varNumSetBytes); // empty the set VarSetOps::Iter iter(comp, vars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { unsigned varNum = comp->lvaTrackedIndexToLclNum(varIndex); pVarNumSet[varNum] = 1; // This varNum is in the set } bool first = true; printf("{"); for (size_t varNum = 0; varNum < comp->lvaCount; varNum++) { if (pVarNumSet[varNum] == 1) { if (!first) { printf(" "); } printf("V%02u", varNum); first = false; } } printf("}"); } /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Debugging helpers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ /* The following functions are intended to be called from the debugger, to dump * various data structures. * * The versions that start with 'c' take a Compiler* as the first argument. * The versions that start with 'd' use the tlsCompiler, so don't require a Compiler*. * * Summary: * cBlock, dBlock : Display a basic block (call fgTableDispBasicBlock()). * cBlocks, dBlocks : Display all the basic blocks of a function (call fgDispBasicBlocks()). * cBlocksV, dBlocksV : Display all the basic blocks of a function (call fgDispBasicBlocks(true)). * "V" means "verbose", and will dump all the trees. * cStmt, dStmt : Display a Statement (call gtDispStmt()). * cTree, dTree : Display a tree (call gtDispTree()). * cTreeLIR, dTreeLIR : Display a tree in LIR form (call gtDispLIRNode()). * cTrees, dTrees : Display all the trees in a function (call fgDumpTrees()). * cEH, dEH : Display the EH handler table (call fgDispHandlerTab()). * cVar, dVar : Display a local variable given its number (call lvaDumpEntry()). * cVarDsc, dVarDsc : Display a local variable given a LclVarDsc* (call lvaDumpEntry()). * cVars, dVars : Display the local variable table (call lvaTableDump()). * cVarsFinal, dVarsFinal : Display the local variable table (call lvaTableDump(FINAL_FRAME_LAYOUT)). * cBlockCheapPreds, dBlockCheapPreds : Display a block's cheap predecessors (call block->dspCheapPreds()). * cBlockPreds, dBlockPreds : Display a block's predecessors (call block->dspPreds()). * cBlockSuccs, dBlockSuccs : Display a block's successors (call block->dspSuccs(compiler)). * cReach, dReach : Display all block reachability (call fgDispReach()). * cDoms, dDoms : Display all block dominators (call fgDispDoms()). * cLiveness, dLiveness : Display per-block variable liveness (call fgDispBBLiveness()). * cCVarSet, dCVarSet : Display a "converted" VARSET_TP: the varset is assumed to be tracked variable * indices. These are converted to variable numbers and sorted. (Calls * dumpConvertedVarSet()). * cLoop, dLoop : Display the blocks of a loop, including the trees. * cTreeFlags, dTreeFlags : Display tree flags * * The following don't require a Compiler* to work: * dRegMask : Display a regMaskTP (call dspRegMask(mask)). * dBlockList : Display a BasicBlockList*. */ void cBlock(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Block %u\n", sequenceNumber++); comp->fgTableDispBasicBlock(block); } void cBlocks(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Blocks %u\n", sequenceNumber++); comp->fgDispBasicBlocks(); } void cBlocksV(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlocksV %u\n", sequenceNumber++); comp->fgDispBasicBlocks(true); } void cStmt(Compiler* comp, Statement* statement) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Stmt %u\n", sequenceNumber++); comp->gtDispStmt(statement, ">>>"); } void cTree(Compiler* comp, GenTree* tree) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Tree %u\n", sequenceNumber++); comp->gtDispTree(tree, nullptr, ">>>"); } void cTreeLIR(Compiler* comp, GenTree* tree) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *TreeLIR %u\n", sequenceNumber++); comp->gtDispLIRNode(tree); } void cTrees(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Trees %u\n", sequenceNumber++); comp->fgDumpTrees(comp->fgFirstBB, nullptr); } void cEH(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *EH %u\n", sequenceNumber++); comp->fgDispHandlerTab(); } void cVar(Compiler* comp, unsigned lclNum) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Var %u\n", sequenceNumber++); comp->lvaDumpEntry(lclNum, Compiler::FINAL_FRAME_LAYOUT); } void cVarDsc(Compiler* comp, LclVarDsc* varDsc) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *VarDsc %u\n", sequenceNumber++); unsigned lclNum = comp->lvaGetLclNum(varDsc); comp->lvaDumpEntry(lclNum, Compiler::FINAL_FRAME_LAYOUT); } void cVars(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Vars %u\n", sequenceNumber++); comp->lvaTableDump(); } void cVarsFinal(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Vars %u\n", sequenceNumber++); comp->lvaTableDump(Compiler::FINAL_FRAME_LAYOUT); } void cBlockCheapPreds(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockCheapPreds %u\n", sequenceNumber++); block->dspCheapPreds(); } void cBlockPreds(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockPreds %u\n", sequenceNumber++); block->dspPreds(); } void cBlockSuccs(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockSuccs %u\n", sequenceNumber++); block->dspSuccs(comp); } void cReach(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Reach %u\n", sequenceNumber++); comp->fgDispReach(); } void cDoms(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Doms %u\n", sequenceNumber++); comp->fgDispDoms(); } void cLiveness(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Liveness %u\n", sequenceNumber++); comp->fgDispBBLiveness(); } void cCVarSet(Compiler* comp, VARSET_VALARG_TP vars) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *CVarSet %u\n", sequenceNumber++); dumpConvertedVarSet(comp, vars); printf("\n"); // dumpConvertedVarSet() doesn't emit a trailing newline } void cLoop(Compiler* comp, unsigned loopNum) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Loop %u\n", sequenceNumber++); comp->optPrintLoopInfo(loopNum, /* verbose */ true); printf("\n"); } void cLoopPtr(Compiler* comp, const Compiler::LoopDsc* loop) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *LoopPtr %u\n", sequenceNumber++); comp->optPrintLoopInfo(loop, /* verbose */ true); printf("\n"); } void cLoops(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Loops %u\n", sequenceNumber++); comp->optPrintLoopTable(); } void dBlock(BasicBlock* block) { cBlock(JitTls::GetCompiler(), block); } void dBlocks() { cBlocks(JitTls::GetCompiler()); } void dBlocksV() { cBlocksV(JitTls::GetCompiler()); } void dStmt(Statement* statement) { cStmt(JitTls::GetCompiler(), statement); } void dTree(GenTree* tree) { cTree(JitTls::GetCompiler(), tree); } void dTreeLIR(GenTree* tree) { cTreeLIR(JitTls::GetCompiler(), tree); } void dTreeRange(GenTree* first, GenTree* last) { Compiler* comp = JitTls::GetCompiler(); GenTree* cur = first; while (true) { cTreeLIR(comp, cur); if (cur == last) break; cur = cur->gtNext; } } void dTrees() { cTrees(JitTls::GetCompiler()); } void dEH() { cEH(JitTls::GetCompiler()); } void dVar(unsigned lclNum) { cVar(JitTls::GetCompiler(), lclNum); } void dVarDsc(LclVarDsc* varDsc) { cVarDsc(JitTls::GetCompiler(), varDsc); } void dVars() { cVars(JitTls::GetCompiler()); } void dVarsFinal() { cVarsFinal(JitTls::GetCompiler()); } void dBlockPreds(BasicBlock* block) { cBlockPreds(JitTls::GetCompiler(), block); } void dBlockCheapPreds(BasicBlock* block) { cBlockCheapPreds(JitTls::GetCompiler(), block); } void dBlockSuccs(BasicBlock* block) { cBlockSuccs(JitTls::GetCompiler(), block); } void dReach() { cReach(JitTls::GetCompiler()); } void dDoms() { cDoms(JitTls::GetCompiler()); } void dLiveness() { cLiveness(JitTls::GetCompiler()); } void dCVarSet(VARSET_VALARG_TP vars) { cCVarSet(JitTls::GetCompiler(), vars); } void dLoop(unsigned loopNum) { cLoop(JitTls::GetCompiler(), loopNum); } void dLoopPtr(const Compiler::LoopDsc* loop) { cLoopPtr(JitTls::GetCompiler(), loop); } void dLoops() { cLoops(JitTls::GetCompiler()); } void dRegMask(regMaskTP mask) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== dRegMask %u\n", sequenceNumber++); dspRegMask(mask); printf("\n"); // dspRegMask() doesn't emit a trailing newline } void dBlockList(BasicBlockList* list) { printf("WorkList: "); while (list != nullptr) { printf(FMT_BB " ", list->block->bbNum); list = list->next; } printf("\n"); } // Global variables available in debug mode. That are set by debug APIs for finding // Trees, Stmts, and/or Blocks using id or bbNum. // That can be used in watch window or as a way to get address of fields for data break points. GenTree* dbTree; Statement* dbStmt; BasicBlock* dbTreeBlock; BasicBlock* dbBlock; // Debug APIs for finding Trees, Stmts, and/or Blocks. // As a side effect, they set the debug variables above. GenTree* dFindTree(GenTree* tree, unsigned id) { if (tree == nullptr) { return nullptr; } if (tree->gtTreeID == id) { dbTree = tree; return tree; } GenTree* child = nullptr; tree->VisitOperands([&child, id](GenTree* operand) -> GenTree::VisitResult { child = dFindTree(child, id); return (child != nullptr) ? GenTree::VisitResult::Abort : GenTree::VisitResult::Continue; }); return child; } GenTree* dFindTree(unsigned id) { Compiler* comp = JitTls::GetCompiler(); GenTree* tree; dbTreeBlock = nullptr; dbTree = nullptr; for (BasicBlock* const block : comp->Blocks()) { for (Statement* const stmt : block->Statements()) { tree = dFindTree(stmt->GetRootNode(), id); if (tree != nullptr) { dbTreeBlock = block; return tree; } } } return nullptr; } Statement* dFindStmt(unsigned id) { Compiler* comp = JitTls::GetCompiler(); dbStmt = nullptr; unsigned stmtId = 0; for (BasicBlock* const block : comp->Blocks()) { for (Statement* const stmt : block->Statements()) { stmtId++; if (stmtId == id) { dbStmt = stmt; return stmt; } } } return nullptr; } BasicBlock* dFindBlock(unsigned bbNum) { Compiler* comp = JitTls::GetCompiler(); BasicBlock* block = nullptr; dbBlock = nullptr; for (block = comp->fgFirstBB; block != nullptr; block = block->bbNext) { if (block->bbNum == bbNum) { dbBlock = block; break; } } return block; } Compiler::LoopDsc* dFindLoop(unsigned loopNum) { Compiler* comp = JitTls::GetCompiler(); if (loopNum >= comp->optLoopCount) { printf("loopNum %u out of range\n"); return nullptr; } return &comp->optLoopTable[loopNum]; } void cTreeFlags(Compiler* comp, GenTree* tree) { int chars = 0; if (tree->gtFlags != 0) { chars += printf("flags="); // Node flags CLANG_FORMAT_COMMENT_ANCHOR; #if defined(DEBUG) if (tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) { chars += printf("[NODE_LARGE]"); } if (tree->gtDebugFlags & GTF_DEBUG_NODE_SMALL) { chars += printf("[NODE_SMALL]"); } if (tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) { chars += printf("[MORPHED]"); } #endif // defined(DEBUG) if (tree->gtFlags & GTF_COLON_COND) { chars += printf("[COLON_COND]"); } // Operator flags genTreeOps op = tree->OperGet(); switch (op) { case GT_LCL_VAR: case GT_LCL_VAR_ADDR: case GT_LCL_FLD: case GT_LCL_FLD_ADDR: case GT_STORE_LCL_FLD: case GT_STORE_LCL_VAR: if (tree->gtFlags & GTF_VAR_DEF) { chars += printf("[VAR_DEF]"); } if (tree->gtFlags & GTF_VAR_USEASG) { chars += printf("[VAR_USEASG]"); } if (tree->gtFlags & GTF_VAR_CAST) { chars += printf("[VAR_CAST]"); } if (tree->gtFlags & GTF_VAR_ITERATOR) { chars += printf("[VAR_ITERATOR]"); } if (tree->gtFlags & GTF_VAR_CLONED) { chars += printf("[VAR_CLONED]"); } if (tree->gtFlags & GTF_VAR_DEATH) { chars += printf("[VAR_DEATH]"); } if (tree->gtFlags & GTF_VAR_ARR_INDEX) { chars += printf("[VAR_ARR_INDEX]"); } #if defined(DEBUG) if (tree->gtDebugFlags & GTF_DEBUG_VAR_CSE_REF) { chars += printf("[VAR_CSE_REF]"); } #endif break; case GT_NO_OP: break; case GT_FIELD: if (tree->gtFlags & GTF_FLD_VOLATILE) { chars += printf("[FLD_VOLATILE]"); } break; case GT_INDEX: if (tree->gtFlags & GTF_INX_STRING_LAYOUT) { chars += printf("[INX_STRING_LAYOUT]"); } FALLTHROUGH; case GT_INDEX_ADDR: if (tree->gtFlags & GTF_INX_RNGCHK) { chars += printf("[INX_RNGCHK]"); } break; case GT_IND: case GT_STOREIND: if (tree->gtFlags & GTF_IND_VOLATILE) { chars += printf("[IND_VOLATILE]"); } if (tree->gtFlags & GTF_IND_TGTANYWHERE) { chars += printf("[IND_TGTANYWHERE]"); } if (tree->gtFlags & GTF_IND_TGT_NOT_HEAP) { chars += printf("[IND_TGT_NOT_HEAP]"); } if (tree->gtFlags & GTF_IND_TLS_REF) { chars += printf("[IND_TLS_REF]"); } if (tree->gtFlags & GTF_IND_ASG_LHS) { chars += printf("[IND_ASG_LHS]"); } if (tree->gtFlags & GTF_IND_UNALIGNED) { chars += printf("[IND_UNALIGNED]"); } if (tree->gtFlags & GTF_IND_INVARIANT) { chars += printf("[IND_INVARIANT]"); } if (tree->gtFlags & GTF_IND_NONNULL) { chars += printf("[IND_NONNULL]"); } break; case GT_CLS_VAR: if (tree->gtFlags & GTF_CLS_VAR_ASG_LHS) { chars += printf("[CLS_VAR_ASG_LHS]"); } break; case GT_MUL: #if !defined(TARGET_64BIT) case GT_MUL_LONG: #endif if (tree->gtFlags & GTF_MUL_64RSLT) { chars += printf("[64RSLT]"); } if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_ADD: if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_LSH: if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_MOD: case GT_UMOD: break; case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GT: case GT_GE: if (tree->gtFlags & GTF_RELOP_NAN_UN) { chars += printf("[RELOP_NAN_UN]"); } if (tree->gtFlags & GTF_RELOP_JMP_USED) { chars += printf("[RELOP_JMP_USED]"); } break; case GT_QMARK: if (tree->gtFlags & GTF_QMARK_CAST_INSTOF) { chars += printf("[QMARK_CAST_INSTOF]"); } break; case GT_BOX: if (tree->gtFlags & GTF_BOX_VALUE) { chars += printf("[BOX_VALUE]"); } break; case GT_CNS_INT: { GenTreeFlags handleKind = (tree->gtFlags & GTF_ICON_HDL_MASK); switch (handleKind) { case GTF_ICON_SCOPE_HDL: chars += printf("[ICON_SCOPE_HDL]"); break; case GTF_ICON_CLASS_HDL: chars += printf("[ICON_CLASS_HDL]"); break; case GTF_ICON_METHOD_HDL: chars += printf("[ICON_METHOD_HDL]"); break; case GTF_ICON_FIELD_HDL: chars += printf("[ICON_FIELD_HDL]"); break; case GTF_ICON_STATIC_HDL: chars += printf("[ICON_STATIC_HDL]"); break; case GTF_ICON_STR_HDL: chars += printf("[ICON_STR_HDL]"); break; case GTF_ICON_CONST_PTR: chars += printf("[ICON_CONST_PTR]"); break; case GTF_ICON_GLOBAL_PTR: chars += printf("[ICON_GLOBAL_PTR]"); break; case GTF_ICON_VARG_HDL: chars += printf("[ICON_VARG_HDL]"); break; case GTF_ICON_PINVKI_HDL: chars += printf("[ICON_PINVKI_HDL]"); break; case GTF_ICON_TOKEN_HDL: chars += printf("[ICON_TOKEN_HDL]"); break; case GTF_ICON_TLS_HDL: chars += printf("[ICON_TLD_HDL]"); break; case GTF_ICON_FTN_ADDR: chars += printf("[ICON_FTN_ADDR]"); break; case GTF_ICON_CIDMID_HDL: chars += printf("[ICON_CIDMID_HDL]"); break; case GTF_ICON_BBC_PTR: chars += printf("[ICON_BBC_PTR]"); break; case GTF_ICON_STATIC_BOX_PTR: chars += printf("[GTF_ICON_STATIC_BOX_PTR]"); break; case GTF_ICON_FIELD_OFF: chars += printf("[ICON_FIELD_OFF]"); break; default: assert(!"a forgotten handle flag"); break; } } break; case GT_OBJ: case GT_STORE_OBJ: if (tree->AsObj()->GetLayout()->HasGCPtr()) { chars += printf("[BLK_HASGCPTR]"); } FALLTHROUGH; case GT_BLK: case GT_STORE_BLK: case GT_STORE_DYN_BLK: if (tree->gtFlags & GTF_BLK_VOLATILE) { chars += printf("[BLK_VOLATILE]"); } if (tree->AsBlk()->IsUnaligned()) { chars += printf("[BLK_UNALIGNED]"); } break; case GT_CALL: if (tree->gtFlags & GTF_CALL_UNMANAGED) { chars += printf("[CALL_UNMANAGED]"); } if (tree->gtFlags & GTF_CALL_INLINE_CANDIDATE) { chars += printf("[CALL_INLINE_CANDIDATE]"); } if (!tree->AsCall()->IsVirtual()) { chars += printf("[CALL_NONVIRT]"); } if (tree->AsCall()->IsVirtualVtable()) { chars += printf("[CALL_VIRT_VTABLE]"); } if (tree->AsCall()->IsVirtualStub()) { chars += printf("[CALL_VIRT_STUB]"); } if (tree->gtFlags & GTF_CALL_NULLCHECK) { chars += printf("[CALL_NULLCHECK]"); } if (tree->gtFlags & GTF_CALL_POP_ARGS) { chars += printf("[CALL_POP_ARGS]"); } if (tree->gtFlags & GTF_CALL_HOISTABLE) { chars += printf("[CALL_HOISTABLE]"); } // More flags associated with calls. { GenTreeCall* call = tree->AsCall(); if (call->gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) { chars += printf("[CALL_M_EXPLICIT_TAILCALL]"); } if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL) { chars += printf("[CALL_M_TAILCALL]"); } if (call->gtCallMoreFlags & GTF_CALL_M_VARARGS) { chars += printf("[CALL_M_VARARGS]"); } if (call->gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) { chars += printf("[CALL_M_RETBUFFARG]"); } if (call->gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) { chars += printf("[CALL_M_DELEGATE_INV]"); } if (call->gtCallMoreFlags & GTF_CALL_M_NOGCCHECK) { chars += printf("[CALL_M_NOGCCHECK]"); } if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { chars += printf("[CALL_M_SPECIAL_INTRINSIC]"); } if (call->IsUnmanaged()) { if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { chars += printf("[CALL_M_UNMGD_THISCALL]"); } } else if (call->IsVirtualStub()) { if (call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) { chars += printf("[CALL_M_VIRTSTUB_REL_INDIRECT]"); } } else if (!call->IsVirtual()) { if (call->gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) { chars += printf("[CALL_M_NONVIRT_SAME_THIS]"); } } if (call->gtCallMoreFlags & GTF_CALL_M_FRAME_VAR_DEATH) { chars += printf("[CALL_M_FRAME_VAR_DEATH]"); } if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER) { chars += printf("[CALL_M_TAILCALL_VIA_JIT_HELPER]"); } #if FEATURE_TAILCALL_OPT if (call->gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) { chars += printf("[CALL_M_IMPLICIT_TAILCALL]"); } #endif if (call->gtCallMoreFlags & GTF_CALL_M_PINVOKE) { chars += printf("[CALL_M_PINVOKE]"); } if (call->IsFatPointerCandidate()) { chars += printf("[CALL_FAT_POINTER_CANDIDATE]"); } if (call->IsGuarded()) { chars += printf("[CALL_GUARDED]"); } if (call->IsExpRuntimeLookup()) { chars += printf("[CALL_EXP_RUNTIME_LOOKUP]"); } } break; default: { GenTreeFlags flags = (tree->gtFlags & (~(GTF_COMMON_MASK | GTF_OVERFLOW))); if (flags != 0) { chars += printf("[%08X]", flags); } } break; } // Common flags. if (tree->gtFlags & GTF_ASG) { chars += printf("[ASG]"); } if (tree->gtFlags & GTF_CALL) { chars += printf("[CALL]"); } switch (op) { case GT_MUL: case GT_CAST: case GT_ADD: case GT_SUB: if (tree->gtFlags & GTF_OVERFLOW) { chars += printf("[OVERFLOW]"); } break; default: break; } if (tree->gtFlags & GTF_EXCEPT) { chars += printf("[EXCEPT]"); } if (tree->gtFlags & GTF_GLOB_REF) { chars += printf("[GLOB_REF]"); } if (tree->gtFlags & GTF_ORDER_SIDEEFF) { chars += printf("[ORDER_SIDEEFF]"); } if (tree->gtFlags & GTF_REVERSE_OPS) { if (op != GT_LCL_VAR) { chars += printf("[REVERSE_OPS]"); } } if (tree->gtFlags & GTF_SPILLED) { chars += printf("[SPILLED_OPER]"); } #if FEATURE_SET_FLAGS if (tree->gtFlags & GTF_SET_FLAGS) { if ((op != GT_IND) && (op != GT_STOREIND)) { chars += printf("[ZSF_SET_FLAGS]"); } } #endif if (tree->gtFlags & GTF_IND_NONFAULTING) { if (tree->OperIsIndirOrArrLength()) { chars += printf("[IND_NONFAULTING]"); } } if (tree->gtFlags & GTF_MAKE_CSE) { chars += printf("[MAKE_CSE]"); } if (tree->gtFlags & GTF_DONT_CSE) { chars += printf("[DONT_CSE]"); } if (tree->gtFlags & GTF_BOOLEAN) { chars += printf("[BOOLEAN]"); } if (tree->gtFlags & GTF_UNSIGNED) { chars += printf("[SMALL_UNSIGNED]"); } if (tree->gtFlags & GTF_LATE_ARG) { chars += printf("[SMALL_LATE_ARG]"); } if (tree->gtFlags & GTF_SPILL) { chars += printf("[SPILL]"); } if (tree->gtFlags & GTF_REUSE_REG_VAL) { if (op == GT_CNS_INT) { chars += printf("[REUSE_REG_VAL]"); } } } } void dTreeFlags(GenTree* tree) { cTreeFlags(JitTls::GetCompiler(), tree); } #endif // DEBUG #if VARSET_COUNTOPS // static BitSetSupport::BitSetOpCounter Compiler::m_varsetOpCounter("VarSetOpCounts.log"); #endif #if ALLVARSET_COUNTOPS // static BitSetSupport::BitSetOpCounter Compiler::m_allvarsetOpCounter("AllVarSetOpCounts.log"); #endif // static HelperCallProperties Compiler::s_helperCallProperties; /*****************************************************************************/ /*****************************************************************************/ //------------------------------------------------------------------------ // killGCRefs: // Given some tree node return does it need all GC refs to be spilled from // callee save registers. // // Arguments: // tree - the tree for which we ask about gc refs. // // Return Value: // true - tree kills GC refs on callee save registers // false - tree doesn't affect GC refs on callee save registers bool Compiler::killGCRefs(GenTree* tree) { if (tree->IsCall()) { GenTreeCall* call = tree->AsCall(); if (call->IsUnmanaged()) { return true; } if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_JIT_PINVOKE_BEGIN)) { assert(opts.ShouldUsePInvokeHelpers()); return true; } } else if (tree->OperIs(GT_START_PREEMPTGC)) { return true; } return false; } //------------------------------------------------------------------------ // lvaIsOSRLocal: check if this local var is one that requires special // treatment for OSR compilations. // // Arguments: // varNum - variable of interest // // Return Value: // true - this is an OSR compile and this local requires special treatment // false - not an OSR compile, or not an interesting local for OSR bool Compiler::lvaIsOSRLocal(unsigned varNum) { if (!opts.IsOSR()) { return false; } if (varNum < info.compLocalsCount) { return true; } LclVarDsc* varDsc = lvaGetDesc(varNum); if (varDsc->lvIsStructField) { return (varDsc->lvParentLcl < info.compLocalsCount); } return false; } //------------------------------------------------------------------------------ // gtTypeForNullCheck: helper to get the most optimal and correct type for nullcheck // // Arguments: // tree - the node for nullcheck; // var_types Compiler::gtTypeForNullCheck(GenTree* tree) { if (varTypeIsArithmetic(tree)) { #if defined(TARGET_XARCH) // Just an optimization for XARCH - smaller mov if (varTypeIsLong(tree)) { return TYP_INT; } #endif return tree->TypeGet(); } // for the rest: probe a single byte to avoid potential AVEs return TYP_BYTE; } //------------------------------------------------------------------------------ // gtChangeOperToNullCheck: helper to change tree oper to a NULLCHECK. // // Arguments: // tree - the node to change; // basicBlock - basic block of the node. // // Notes: // the function should not be called after lowering for platforms that do not support // emitting NULLCHECK nodes, like arm32. Use `Lowering::TransformUnusedIndirection` // that handles it and calls this function when appropriate. // void Compiler::gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block) { assert(tree->OperIs(GT_FIELD, GT_IND, GT_OBJ, GT_BLK)); tree->ChangeOper(GT_NULLCHECK); tree->ChangeType(gtTypeForNullCheck(tree)); block->bbFlags |= BBF_HAS_NULLCHECK; optMethodFlags |= OMF_HAS_NULLCHECK; } #if defined(DEBUG) //------------------------------------------------------------------------------ // devirtualizationDetailToString: describe the detailed devirtualization reason // // Arguments: // detail - detail to describe // // Returns: // descriptive string // const char* Compiler::devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail) { switch (detail) { case CORINFO_DEVIRTUALIZATION_UNKNOWN: return "unknown"; case CORINFO_DEVIRTUALIZATION_SUCCESS: return "success"; case CORINFO_DEVIRTUALIZATION_FAILED_CANON: return "object class was canonical"; case CORINFO_DEVIRTUALIZATION_FAILED_COM: return "object class was com"; case CORINFO_DEVIRTUALIZATION_FAILED_CAST: return "object class could not be cast to interface class"; case CORINFO_DEVIRTUALIZATION_FAILED_LOOKUP: return "interface method could not be found"; case CORINFO_DEVIRTUALIZATION_FAILED_DIM: return "interface method was default interface method"; case CORINFO_DEVIRTUALIZATION_FAILED_SUBCLASS: return "object not subclass of base class"; case CORINFO_DEVIRTUALIZATION_FAILED_SLOT: return "virtual method installed via explicit override"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE: return "devirtualization crossed version bubble"; case CORINFO_DEVIRTUALIZATION_MULTIPLE_IMPL: return "object class has multiple implementations of interface"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_CLASS_DECL: return "decl method is defined on class and decl method not in version bubble, and decl method not in " "type closest to version bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_INTERFACE_DECL: return "decl method is defined on interface and not in version bubble, and implementation type not " "entirely defined in bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL: return "object class not defined within version bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL_NOT_REFERENCEABLE: return "object class cannot be referenced from R2R code due to missing tokens"; case CORINFO_DEVIRTUALIZATION_FAILED_DUPLICATE_INTERFACE: return "crossgen2 virtual method algorithm and runtime algorithm differ in the presence of duplicate " "interface implementations"; case CORINFO_DEVIRTUALIZATION_FAILED_DECL_NOT_REPRESENTABLE: return "Decl method cannot be represented in R2R image"; default: return "undefined"; } } #endif // defined(DEBUG) #if TRACK_ENREG_STATS Compiler::EnregisterStats Compiler::s_enregisterStats; void Compiler::EnregisterStats::RecordLocal(const LclVarDsc* varDsc) { m_totalNumberOfVars++; if (varDsc->TypeGet() == TYP_STRUCT) { m_totalNumberOfStructVars++; } if (!varDsc->lvDoNotEnregister) { m_totalNumberOfEnregVars++; if (varDsc->TypeGet() == TYP_STRUCT) { m_totalNumberOfStructEnregVars++; } } else { switch (varDsc->GetDoNotEnregReason()) { case DoNotEnregisterReason::AddrExposed: m_addrExposed++; break; case DoNotEnregisterReason::HiddenBufferStructArg: m_hiddenStructArg++; break; case DoNotEnregisterReason::DontEnregStructs: m_dontEnregStructs++; break; case DoNotEnregisterReason::NotRegSizeStruct: m_notRegSizeStruct++; break; case DoNotEnregisterReason::LocalField: m_localField++; break; case DoNotEnregisterReason::VMNeedsStackAddr: m_VMNeedsStackAddr++; break; case DoNotEnregisterReason::LiveInOutOfHandler: m_liveInOutHndlr++; break; case DoNotEnregisterReason::BlockOp: m_blockOp++; break; case DoNotEnregisterReason::IsStructArg: m_structArg++; break; case DoNotEnregisterReason::DepField: m_depField++; break; case DoNotEnregisterReason::NoRegVars: m_noRegVars++; break; case DoNotEnregisterReason::MinOptsGC: m_minOptsGC++; break; #if !defined(TARGET_64BIT) case DoNotEnregisterReason::LongParamField: m_longParamField++; break; #endif #ifdef JIT32_GCENCODER case DoNotEnregisterReason::PinningRef: m_PinningRef++; break; #endif case DoNotEnregisterReason::LclAddrNode: m_lclAddrNode++; break; case DoNotEnregisterReason::CastTakesAddr: m_castTakesAddr++; break; case DoNotEnregisterReason::StoreBlkSrc: m_storeBlkSrc++; break; case DoNotEnregisterReason::OneAsgRetyping: m_oneAsgRetyping++; break; case DoNotEnregisterReason::SwizzleArg: m_swizzleArg++; break; case DoNotEnregisterReason::BlockOpRet: m_blockOpRet++; break; case DoNotEnregisterReason::ReturnSpCheck: m_returnSpCheck++; break; case DoNotEnregisterReason::SimdUserForcesDep: m_simdUserForcesDep++; break; default: unreached(); break; } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::AddrExposed) { // We can't `assert(IsAddressExposed())` because `fgAdjustForAddressExposedOrWrittenThis` // does not clear `m_doNotEnregReason` on `this`. switch (varDsc->GetAddrExposedReason()) { case AddressExposedReason::PARENT_EXPOSED: m_parentExposed++; break; case AddressExposedReason::TOO_CONSERVATIVE: m_tooConservative++; break; case AddressExposedReason::ESCAPE_ADDRESS: m_escapeAddress++; break; case AddressExposedReason::WIDE_INDIR: m_wideIndir++; break; case AddressExposedReason::OSR_EXPOSED: m_osrExposed++; break; case AddressExposedReason::STRESS_LCL_FLD: m_stressLclFld++; break; case AddressExposedReason::COPY_FLD_BY_FLD: m_copyFldByFld++; break; case AddressExposedReason::DISPATCH_RET_BUF: m_dispatchRetBuf++; break; default: unreached(); break; } } } } void Compiler::EnregisterStats::Dump(FILE* fout) const { const unsigned totalNumberOfNotStructVars = s_enregisterStats.m_totalNumberOfVars - s_enregisterStats.m_totalNumberOfStructVars; const unsigned totalNumberOfNotStructEnregVars = s_enregisterStats.m_totalNumberOfEnregVars - s_enregisterStats.m_totalNumberOfStructEnregVars; const unsigned notEnreg = s_enregisterStats.m_totalNumberOfVars - s_enregisterStats.m_totalNumberOfEnregVars; fprintf(fout, "\nLocals enregistration statistics:\n"); if (m_totalNumberOfVars == 0) { fprintf(fout, "No locals to report.\n"); return; } fprintf(fout, "total number of locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", m_totalNumberOfVars, m_totalNumberOfEnregVars, m_totalNumberOfVars - m_totalNumberOfEnregVars, (float)m_totalNumberOfEnregVars / m_totalNumberOfVars); if (m_totalNumberOfStructVars != 0) { fprintf(fout, "total number of struct locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", m_totalNumberOfStructVars, m_totalNumberOfStructEnregVars, m_totalNumberOfStructVars - m_totalNumberOfStructEnregVars, (float)m_totalNumberOfStructEnregVars / m_totalNumberOfStructVars); } const unsigned numberOfPrimitiveLocals = totalNumberOfNotStructVars - totalNumberOfNotStructEnregVars; if (numberOfPrimitiveLocals != 0) { fprintf(fout, "total number of primitive locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", totalNumberOfNotStructVars, totalNumberOfNotStructEnregVars, numberOfPrimitiveLocals, (float)totalNumberOfNotStructEnregVars / totalNumberOfNotStructVars); } if (notEnreg == 0) { fprintf(fout, "All locals are enregistered.\n"); return; } #define PRINT_STATS(stat, total) \ if (stat != 0) \ { \ fprintf(fout, #stat " %d, ratio: %.2f\n", stat, (float)stat / total); \ } PRINT_STATS(m_addrExposed, notEnreg); PRINT_STATS(m_hiddenStructArg, notEnreg); PRINT_STATS(m_dontEnregStructs, notEnreg); PRINT_STATS(m_notRegSizeStruct, notEnreg); PRINT_STATS(m_localField, notEnreg); PRINT_STATS(m_VMNeedsStackAddr, notEnreg); PRINT_STATS(m_liveInOutHndlr, notEnreg); PRINT_STATS(m_blockOp, notEnreg); PRINT_STATS(m_structArg, notEnreg); PRINT_STATS(m_depField, notEnreg); PRINT_STATS(m_noRegVars, notEnreg); PRINT_STATS(m_minOptsGC, notEnreg); #if !defined(TARGET_64BIT) PRINT_STATS(m_longParamField, notEnreg); #endif // !TARGET_64BIT #ifdef JIT32_GCENCODER PRINT_STATS(m_PinningRef, notEnreg); #endif // JIT32_GCENCODER PRINT_STATS(m_lclAddrNode, notEnreg); PRINT_STATS(m_castTakesAddr, notEnreg); PRINT_STATS(m_storeBlkSrc, notEnreg); PRINT_STATS(m_oneAsgRetyping, notEnreg); PRINT_STATS(m_swizzleArg, notEnreg); PRINT_STATS(m_blockOpRet, notEnreg); PRINT_STATS(m_returnSpCheck, notEnreg); PRINT_STATS(m_simdUserForcesDep, notEnreg); fprintf(fout, "\nAddr exposed details:\n"); if (m_addrExposed == 0) { fprintf(fout, "\nNo address exposed locals to report.\n"); return; } PRINT_STATS(m_parentExposed, m_addrExposed); PRINT_STATS(m_tooConservative, m_addrExposed); PRINT_STATS(m_escapeAddress, m_addrExposed); PRINT_STATS(m_wideIndir, m_addrExposed); PRINT_STATS(m_osrExposed, m_addrExposed); PRINT_STATS(m_stressLclFld, m_addrExposed); PRINT_STATS(m_copyFldByFld, m_addrExposed); PRINT_STATS(m_dispatchRetBuf, m_addrExposed); } #endif // TRACK_ENREG_STATS
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif // _MSC_VER #include "hostallocator.h" #include "emit.h" #include "ssabuilder.h" #include "valuenum.h" #include "rangecheck.h" #include "lower.h" #include "stacklevelsetter.h" #include "jittelemetry.h" #include "patchpointinfo.h" #include "jitstd/algorithm.h" extern ICorJitHost* g_jitHost; #if defined(DEBUG) // Column settings for COMPlus_JitDumpIR. We could(should) make these programmable. #define COLUMN_OPCODE 30 #define COLUMN_OPERANDS (COLUMN_OPCODE + 25) #define COLUMN_KINDS 110 #define COLUMN_FLAGS (COLUMN_KINDS + 32) #endif #if defined(DEBUG) unsigned Compiler::jitTotalMethodCompiled = 0; #endif // defined(DEBUG) #if defined(DEBUG) LONG Compiler::jitNestingLevel = 0; #endif // defined(DEBUG) // static bool Compiler::s_pAltJitExcludeAssembliesListInitialized = false; AssemblyNamesList2* Compiler::s_pAltJitExcludeAssembliesList = nullptr; #ifdef DEBUG // static bool Compiler::s_pJitDisasmIncludeAssembliesListInitialized = false; AssemblyNamesList2* Compiler::s_pJitDisasmIncludeAssembliesList = nullptr; // static bool Compiler::s_pJitFunctionFileInitialized = false; MethodSet* Compiler::s_pJitMethodSet = nullptr; #endif // DEBUG #ifdef CONFIGURABLE_ARM_ABI // static bool GlobalJitOptions::compFeatureHfa = false; LONG GlobalJitOptions::compUseSoftFPConfigured = 0; #endif // CONFIGURABLE_ARM_ABI /***************************************************************************** * * Little helpers to grab the current cycle counter value; this is done * differently based on target architecture, host toolchain, etc. The * main thing is to keep the overhead absolutely minimal; in fact, on * x86/x64 we use RDTSC even though it's not thread-safe; GetThreadCycles * (which is monotonous) is just too expensive. */ #ifdef FEATURE_JIT_METHOD_PERF #if defined(HOST_X86) || defined(HOST_AMD64) #if defined(_MSC_VER) #include <intrin.h> inline bool _our_GetThreadCycles(unsigned __int64* cycleOut) { *cycleOut = __rdtsc(); return true; } #elif defined(__GNUC__) inline bool _our_GetThreadCycles(unsigned __int64* cycleOut) { uint32_t hi, lo; __asm__ __volatile__("rdtsc" : "=a"(lo), "=d"(hi)); *cycleOut = (static_cast<unsigned __int64>(hi) << 32) | static_cast<unsigned __int64>(lo); return true; } #else // neither _MSC_VER nor __GNUC__ // The following *might* work - might as well try. #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #endif #elif defined(HOST_ARM) || defined(HOST_ARM64) // If this doesn't work please see ../gc/gc.cpp for additional ARM // info (and possible solutions). #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #else // not x86/x64 and not ARM // Don't know what this target is, but let's give it a try; if // someone really wants to make this work, please add the right // code here. #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #endif // which host OS const BYTE genTypeSizes[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) sz, #include "typelist.h" #undef DEF_TP }; const BYTE genTypeAlignments[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) al, #include "typelist.h" #undef DEF_TP }; const BYTE genTypeStSzs[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) st, #include "typelist.h" #undef DEF_TP }; const BYTE genActualTypes[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) jitType, #include "typelist.h" #undef DEF_TP }; #endif // FEATURE_JIT_METHOD_PERF /*****************************************************************************/ inline unsigned getCurTime() { SYSTEMTIME tim; GetSystemTime(&tim); return (((tim.wHour * 60) + tim.wMinute) * 60 + tim.wSecond) * 1000 + tim.wMilliseconds; } /*****************************************************************************/ #ifdef DEBUG /*****************************************************************************/ static FILE* jitSrcFilePtr; static unsigned jitCurSrcLine; void Compiler::JitLogEE(unsigned level, const char* fmt, ...) { va_list args; if (verbose) { va_start(args, fmt); vflogf(jitstdout, fmt, args); va_end(args); } va_start(args, fmt); vlogf(level, fmt, args); va_end(args); } #endif // DEBUG /*****************************************************************************/ #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS static unsigned genMethodCnt; // total number of methods JIT'ted unsigned genMethodICnt; // number of interruptible methods unsigned genMethodNCnt; // number of non-interruptible methods static unsigned genSmallMethodsNeedingExtraMemoryCnt = 0; #endif /*****************************************************************************/ #if MEASURE_NODE_SIZE NodeSizeStats genNodeSizeStats; NodeSizeStats genNodeSizeStatsPerFunc; unsigned genTreeNcntHistBuckets[] = {10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 1000, 5000, 10000, 0}; Histogram genTreeNcntHist(genTreeNcntHistBuckets); unsigned genTreeNsizHistBuckets[] = {1000, 5000, 10000, 50000, 100000, 500000, 1000000, 0}; Histogram genTreeNsizHist(genTreeNsizHistBuckets); #endif // MEASURE_NODE_SIZE /*****************************************************************************/ #if MEASURE_MEM_ALLOC unsigned memAllocHistBuckets[] = {64, 128, 192, 256, 512, 1024, 4096, 8192, 0}; Histogram memAllocHist(memAllocHistBuckets); unsigned memUsedHistBuckets[] = {16, 32, 64, 128, 192, 256, 512, 1024, 4096, 8192, 0}; Histogram memUsedHist(memUsedHistBuckets); #endif // MEASURE_MEM_ALLOC /***************************************************************************** * * Variables to keep track of total code amounts. */ #if DISPLAY_SIZES size_t grossVMsize; // Total IL code size size_t grossNCsize; // Native code + data size size_t totalNCsize; // Native code + data + GC info size (TODO-Cleanup: GC info size only accurate for JIT32_GCENCODER) size_t gcHeaderISize; // GC header size: interruptible methods size_t gcPtrMapISize; // GC pointer map size: interruptible methods size_t gcHeaderNSize; // GC header size: non-interruptible methods size_t gcPtrMapNSize; // GC pointer map size: non-interruptible methods #endif // DISPLAY_SIZES /***************************************************************************** * * Variables to keep track of argument counts. */ #if CALL_ARG_STATS unsigned argTotalCalls; unsigned argHelperCalls; unsigned argStaticCalls; unsigned argNonVirtualCalls; unsigned argVirtualCalls; unsigned argTotalArgs; // total number of args for all calls (including objectPtr) unsigned argTotalDWordArgs; unsigned argTotalLongArgs; unsigned argTotalFloatArgs; unsigned argTotalDoubleArgs; unsigned argTotalRegArgs; unsigned argTotalTemps; unsigned argTotalLclVar; unsigned argTotalDeferred; unsigned argTotalConst; unsigned argTotalObjPtr; unsigned argTotalGTF_ASGinArgs; unsigned argMaxTempsPerMethod; unsigned argCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argCntTable(argCntBuckets); unsigned argDWordCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argDWordCntTable(argDWordCntBuckets); unsigned argDWordLngCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argDWordLngCntTable(argDWordLngCntBuckets); unsigned argTempsCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argTempsCntTable(argTempsCntBuckets); #endif // CALL_ARG_STATS /***************************************************************************** * * Variables to keep track of basic block counts. */ #if COUNT_BASIC_BLOCKS // -------------------------------------------------- // Basic block count frequency table: // -------------------------------------------------- // <= 1 ===> 26872 count ( 56% of total) // 2 .. 2 ===> 669 count ( 58% of total) // 3 .. 3 ===> 4687 count ( 68% of total) // 4 .. 5 ===> 5101 count ( 78% of total) // 6 .. 10 ===> 5575 count ( 90% of total) // 11 .. 20 ===> 3028 count ( 97% of total) // 21 .. 50 ===> 1108 count ( 99% of total) // 51 .. 100 ===> 182 count ( 99% of total) // 101 .. 1000 ===> 34 count (100% of total) // 1001 .. 10000 ===> 0 count (100% of total) // -------------------------------------------------- unsigned bbCntBuckets[] = {1, 2, 3, 5, 10, 20, 50, 100, 1000, 10000, 0}; Histogram bbCntTable(bbCntBuckets); /* Histogram for the IL opcode size of methods with a single basic block */ unsigned bbSizeBuckets[] = {1, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 0}; Histogram bbOneBBSizeTable(bbSizeBuckets); #endif // COUNT_BASIC_BLOCKS /***************************************************************************** * * Used by optFindNaturalLoops to gather statistical information such as * - total number of natural loops * - number of loops with 1, 2, ... exit conditions * - number of loops that have an iterator (for like) * - number of loops that have a constant iterator */ #if COUNT_LOOPS unsigned totalLoopMethods; // counts the total number of methods that have natural loops unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent unsigned totalLoopCount; // counts the total number of natural loops unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent unsigned iterLoopCount; // counts the # of loops with an iterator (for like) unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like) bool hasMethodLoops; // flag to keep track if we already counted a method as having loops unsigned loopsThisMethod; // counts the number of loops in the current method bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method. /* Histogram for number of loops in a method */ unsigned loopCountBuckets[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0}; Histogram loopCountTable(loopCountBuckets); /* Histogram for number of loop exits */ unsigned loopExitCountBuckets[] = {0, 1, 2, 3, 4, 5, 6, 0}; Histogram loopExitCountTable(loopExitCountBuckets); #endif // COUNT_LOOPS //------------------------------------------------------------------------ // getJitGCType: Given the VM's CorInfoGCType convert it to the JIT's var_types // // Arguments: // gcType - an enum value that originally came from an element // of the BYTE[] returned from getClassGClayout() // // Return Value: // The corresponsing enum value from the JIT's var_types // // Notes: // The gcLayout of each field of a struct is returned from getClassGClayout() // as a BYTE[] but each BYTE element is actually a CorInfoGCType value // Note when we 'know' that there is only one element in theis array // the JIT will often pass the address of a single BYTE, instead of a BYTE[] // var_types Compiler::getJitGCType(BYTE gcType) { var_types result = TYP_UNKNOWN; CorInfoGCType corInfoType = (CorInfoGCType)gcType; if (corInfoType == TYPE_GC_NONE) { result = TYP_I_IMPL; } else if (corInfoType == TYPE_GC_REF) { result = TYP_REF; } else if (corInfoType == TYPE_GC_BYREF) { result = TYP_BYREF; } else { noway_assert(!"Bad value of 'gcType'"); } return result; } #ifdef TARGET_X86 //--------------------------------------------------------------------------- // isTrivialPointerSizedStruct: // Check if the given struct type contains only one pointer-sized integer value type // // Arguments: // clsHnd - the handle for the struct type. // // Return Value: // true if the given struct type contains only one pointer-sized integer value type, // false otherwise. // bool Compiler::isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const { assert(info.compCompHnd->isValueClass(clsHnd)); if (info.compCompHnd->getClassSize(clsHnd) != TARGET_POINTER_SIZE) { return false; } for (;;) { // all of class chain must be of value type and must have only one field if (!info.compCompHnd->isValueClass(clsHnd) || info.compCompHnd->getClassNumInstanceFields(clsHnd) != 1) { return false; } CORINFO_CLASS_HANDLE* pClsHnd = &clsHnd; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); CorInfoType fieldType = info.compCompHnd->getFieldType(fldHnd, pClsHnd); var_types vt = JITtype2varType(fieldType); if (fieldType == CORINFO_TYPE_VALUECLASS) { clsHnd = *pClsHnd; } else if (varTypeIsI(vt) && !varTypeIsGC(vt)) { return true; } else { return false; } } } #endif // TARGET_X86 //--------------------------------------------------------------------------- // isNativePrimitiveStructType: // Check if the given struct type is an intrinsic type that should be treated as though // it is not a struct at the unmanaged ABI boundary. // // Arguments: // clsHnd - the handle for the struct type. // // Return Value: // true if the given struct type should be treated as a primitive for unmanaged calls, // false otherwise. // bool Compiler::isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd) { if (!isIntrinsicType(clsHnd)) { return false; } const char* namespaceName = nullptr; const char* typeName = getClassNameFromMetadata(clsHnd, &namespaceName); if (strcmp(namespaceName, "System.Runtime.InteropServices") != 0) { return false; } return strcmp(typeName, "CLong") == 0 || strcmp(typeName, "CULong") == 0 || strcmp(typeName, "NFloat") == 0; } //----------------------------------------------------------------------------- // getPrimitiveTypeForStruct: // Get the "primitive" type that is is used for a struct // of size 'structSize'. // We examine 'clsHnd' to check the GC layout of the struct and // return TYP_REF for structs that simply wrap an object. // If the struct is a one element HFA/HVA, we will return the // proper floating point or vector type. // // Arguments: // structSize - the size of the struct type, cannot be zero // clsHnd - the handle for the struct type, used when may have // an HFA or if we need the GC layout for an object ref. // // Return Value: // The primitive type (i.e. byte, short, int, long, ref, float, double) // used to pass or return structs of this size. // If we shouldn't use a "primitive" type then TYP_UNKNOWN is returned. // Notes: // For 32-bit targets (X86/ARM32) the 64-bit TYP_LONG type is not // considered a primitive type by this method. // So a struct that wraps a 'long' is passed and returned in the // same way as any other 8-byte struct // For ARM32 if we have an HFA struct that wraps a 64-bit double // we will return TYP_DOUBLE. // For vector calling conventions, a vector is considered a "primitive" // type, as it is passed in a single register. // var_types Compiler::getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg) { assert(structSize != 0); var_types useType = TYP_UNKNOWN; // Start by determining if we have an HFA/HVA with a single element. if (GlobalJitOptions::compFeatureHfa) { // Arm64 Windows VarArg methods arguments will not classify HFA types, they will need to be treated // as if they are not HFA types. if (!(TargetArchitecture::IsArm64 && TargetOS::IsWindows && isVarArg)) { switch (structSize) { case 4: case 8: #ifdef TARGET_ARM64 case 16: #endif // TARGET_ARM64 { var_types hfaType = GetHfaType(clsHnd); // We're only interested in the case where the struct size is equal to the size of the hfaType. if (varTypeIsValidHfaType(hfaType)) { if (genTypeSize(hfaType) == structSize) { useType = hfaType; } else { return TYP_UNKNOWN; } } } } if (useType != TYP_UNKNOWN) { return useType; } } } // Now deal with non-HFA/HVA structs. switch (structSize) { case 1: useType = TYP_BYTE; break; case 2: useType = TYP_SHORT; break; #if !defined(TARGET_XARCH) || defined(UNIX_AMD64_ABI) case 3: useType = TYP_INT; break; #endif // !TARGET_XARCH || UNIX_AMD64_ABI #ifdef TARGET_64BIT case 4: // We dealt with the one-float HFA above. All other 4-byte structs are handled as INT. useType = TYP_INT; break; #if !defined(TARGET_XARCH) || defined(UNIX_AMD64_ABI) case 5: case 6: case 7: useType = TYP_I_IMPL; break; #endif // !TARGET_XARCH || UNIX_AMD64_ABI #endif // TARGET_64BIT case TARGET_POINTER_SIZE: { BYTE gcPtr = 0; // Check if this pointer-sized struct is wrapping a GC object info.compCompHnd->getClassGClayout(clsHnd, &gcPtr); useType = getJitGCType(gcPtr); } break; default: useType = TYP_UNKNOWN; break; } return useType; } //----------------------------------------------------------------------------- // getArgTypeForStruct: // Get the type that is used to pass values of the given struct type. // If you have already retrieved the struct size then it should be // passed as the optional fourth argument, as this allows us to avoid // an extra call to getClassSize(clsHnd) // // Arguments: // clsHnd - the handle for the struct type // wbPassStruct - An "out" argument with information about how // the struct is to be passed // isVarArg - is vararg, used to ignore HFA types for Arm64 windows varargs // structSize - the size of the struct type, // or zero if we should call getClassSize(clsHnd) // // Return Value: // For wbPassStruct you can pass a 'nullptr' and nothing will be written // or returned for that out parameter. // When *wbPassStruct is SPK_PrimitiveType this method's return value // is the primitive type used to pass the struct. // When *wbPassStruct is SPK_ByReference this method's return value // is always TYP_UNKNOWN and the struct type is passed by reference to a copy // When *wbPassStruct is SPK_ByValue or SPK_ByValueAsHfa this method's return value // is always TYP_STRUCT and the struct type is passed by value either // using multiple registers or on the stack. // // Assumptions: // The size must be the size of the given type. // The given class handle must be for a value type (struct). // // Notes: // About HFA types: // When the clsHnd is a one element HFA type we return the appropriate // floating point primitive type and *wbPassStruct is SPK_PrimitiveType // If there are two or more elements in the HFA type then the this method's // return value is TYP_STRUCT and *wbPassStruct is SPK_ByValueAsHfa // var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, structPassingKind* wbPassStruct, bool isVarArg, unsigned structSize) { var_types useType = TYP_UNKNOWN; structPassingKind howToPassStruct = SPK_Unknown; // We must change this before we return assert(structSize != 0); // Determine if we can pass the struct as a primitive type. // Note that on x86 we only pass specific pointer-sized structs that satisfy isTrivialPointerSizedStruct checks. #ifndef TARGET_X86 #ifdef UNIX_AMD64_ABI // An 8-byte struct may need to be passed in a floating point register // So we always consult the struct "Classifier" routine // SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(clsHnd, &structDesc); if (structDesc.passedInRegisters && (structDesc.eightByteCount != 1)) { // We can't pass this as a primitive type. } else if (structDesc.eightByteClassifications[0] == SystemVClassificationTypeSSE) { // If this is passed as a floating type, use that. // Otherwise, we'll use the general case - we don't want to use the "EightByteType" // directly, because it returns `TYP_INT` for any integral type <= 4 bytes, and // we need to preserve small types. useType = GetEightByteType(structDesc, 0); } else #endif // UNIX_AMD64_ABI // The largest arg passed in a single register is MAX_PASS_SINGLEREG_BYTES, // so we can skip calling getPrimitiveTypeForStruct when we // have a struct that is larger than that. // if (structSize <= MAX_PASS_SINGLEREG_BYTES) { // We set the "primitive" useType based upon the structSize // and also examine the clsHnd to see if it is an HFA of count one useType = getPrimitiveTypeForStruct(structSize, clsHnd, isVarArg); } #else if (isTrivialPointerSizedStruct(clsHnd)) { useType = TYP_I_IMPL; } #endif // !TARGET_X86 // Did we change this struct type into a simple "primitive" type? // if (useType != TYP_UNKNOWN) { // Yes, we should use the "primitive" type in 'useType' howToPassStruct = SPK_PrimitiveType; } else // We can't replace the struct with a "primitive" type { // See if we can pass this struct by value, possibly in multiple registers // or if we should pass it by reference to a copy // if (structSize <= MAX_PASS_MULTIREG_BYTES) { // Structs that are HFA/HVA's are passed by value in multiple registers. // Arm64 Windows VarArg methods arguments will not classify HFA/HVA types, they will need to be treated // as if they are not HFA/HVA types. var_types hfaType; if (TargetArchitecture::IsArm64 && TargetOS::IsWindows && isVarArg) { hfaType = TYP_UNDEF; } else { hfaType = GetHfaType(clsHnd); } if (varTypeIsValidHfaType(hfaType)) { // HFA's of count one should have been handled by getPrimitiveTypeForStruct assert(GetHfaCount(clsHnd) >= 2); // setup wbPassType and useType indicate that this is passed by value as an HFA // using multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValueAsHfa; useType = TYP_STRUCT; } else // Not an HFA struct type { #ifdef UNIX_AMD64_ABI // The case of (structDesc.eightByteCount == 1) should have already been handled if ((structDesc.eightByteCount > 1) || !structDesc.passedInRegisters) { // setup wbPassType and useType indicate that this is passed by value in multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; } else { assert(structDesc.eightByteCount == 0); // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register // (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_ARM64) // Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct assert(structSize > TARGET_POINTER_SIZE); // On ARM64 structs that are 9-16 bytes are passed by value in multiple registers // if (structSize <= (TARGET_POINTER_SIZE * 2)) { // setup wbPassType and useType indicate that this is passed by value in multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; } else // a structSize that is 17-32 bytes in size { // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register // (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_X86) || defined(TARGET_ARM) // Otherwise we pass this struct by value on the stack // setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getArgTypeForStruct (with FEATURE_MULTIREG_ARGS=1)"); #endif // TARGET_XXX } } else // (structSize > MAX_PASS_MULTIREG_BYTES) { // We have a (large) struct that can't be replaced with a "primitive" type // and can't be passed in multiple registers CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) || defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) // Otherwise we pass this struct by value on the stack // setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; #elif defined(TARGET_AMD64) || defined(TARGET_ARM64) // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getArgTypeForStruct"); #endif // TARGET_XXX } } // 'howToPassStruct' must be set to one of the valid values before we return assert(howToPassStruct != SPK_Unknown); if (wbPassStruct != nullptr) { *wbPassStruct = howToPassStruct; } return useType; } //----------------------------------------------------------------------------- // getReturnTypeForStruct: // Get the type that is used to return values of the given struct type. // If you have already retrieved the struct size then it should be // passed as the optional third argument, as this allows us to avoid // an extra call to getClassSize(clsHnd) // // Arguments: // clsHnd - the handle for the struct type // callConv - the calling convention of the function // that returns this struct. // wbReturnStruct - An "out" argument with information about how // the struct is to be returned // structSize - the size of the struct type, // or zero if we should call getClassSize(clsHnd) // // Return Value: // For wbReturnStruct you can pass a 'nullptr' and nothing will be written // or returned for that out parameter. // When *wbReturnStruct is SPK_PrimitiveType this method's return value // is the primitive type used to return the struct. // When *wbReturnStruct is SPK_ByReference this method's return value // is always TYP_UNKNOWN and the struct type is returned using a return buffer // When *wbReturnStruct is SPK_ByValue or SPK_ByValueAsHfa this method's return value // is always TYP_STRUCT and the struct type is returned using multiple registers. // // Assumptions: // The size must be the size of the given type. // The given class handle must be for a value type (struct). // // Notes: // About HFA types: // When the clsHnd is a one element HFA type then this method's return // value is the appropriate floating point primitive type and // *wbReturnStruct is SPK_PrimitiveType. // If there are two or more elements in the HFA type and the target supports // multireg return types then the return value is TYP_STRUCT and // *wbReturnStruct is SPK_ByValueAsHfa. // Additionally if there are two or more elements in the HFA type and // the target doesn't support multreg return types then it is treated // as if it wasn't an HFA type. // About returning TYP_STRUCT: // Whenever this method's return value is TYP_STRUCT it always means // that multiple registers are used to return this struct. // var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, CorInfoCallConvExtension callConv, structPassingKind* wbReturnStruct /* = nullptr */, unsigned structSize /* = 0 */) { var_types useType = TYP_UNKNOWN; structPassingKind howToReturnStruct = SPK_Unknown; // We must change this before we return bool canReturnInRegister = true; assert(clsHnd != NO_CLASS_HANDLE); if (structSize == 0) { structSize = info.compCompHnd->getClassSize(clsHnd); } assert(structSize > 0); #ifdef UNIX_AMD64_ABI // An 8-byte struct may need to be returned in a floating point register // So we always consult the struct "Classifier" routine // SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(clsHnd, &structDesc); if (structDesc.eightByteCount == 1) { assert(structSize <= sizeof(double)); assert(structDesc.passedInRegisters); if (structDesc.eightByteClassifications[0] == SystemVClassificationTypeSSE) { // If this is returned as a floating type, use that. // Otherwise, leave as TYP_UNKONWN and we'll sort things out below. useType = GetEightByteType(structDesc, 0); howToReturnStruct = SPK_PrimitiveType; } } else { // Return classification is not always size based... canReturnInRegister = structDesc.passedInRegisters; if (!canReturnInRegister) { assert(structDesc.eightByteCount == 0); howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } } #elif UNIX_X86_ABI if (callConv != CorInfoCallConvExtension::Managed && !isNativePrimitiveStructType(clsHnd)) { canReturnInRegister = false; howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #endif if (TargetOS::IsWindows && !TargetArchitecture::IsArm32 && callConvIsInstanceMethodCallConv(callConv) && !isNativePrimitiveStructType(clsHnd)) { canReturnInRegister = false; howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } // Check for cases where a small struct is returned in a register // via a primitive type. // // The largest "primitive type" is MAX_PASS_SINGLEREG_BYTES // so we can skip calling getPrimitiveTypeForStruct when we // have a struct that is larger than that. if (canReturnInRegister && (useType == TYP_UNKNOWN) && (structSize <= MAX_PASS_SINGLEREG_BYTES)) { // We set the "primitive" useType based upon the structSize // and also examine the clsHnd to see if it is an HFA of count one // // The ABI for struct returns in varArg methods, is same as the normal case, // so pass false for isVararg useType = getPrimitiveTypeForStruct(structSize, clsHnd, /*isVararg=*/false); if (useType != TYP_UNKNOWN) { if (structSize == genTypeSize(useType)) { // Currently: 1, 2, 4, or 8 byte structs howToReturnStruct = SPK_PrimitiveType; } else { // Currently: 3, 5, 6, or 7 byte structs assert(structSize < genTypeSize(useType)); howToReturnStruct = SPK_EnclosingType; } } } #ifdef TARGET_64BIT // Note this handles an odd case when FEATURE_MULTIREG_RET is disabled and HFAs are enabled // // getPrimitiveTypeForStruct will return TYP_UNKNOWN for a struct that is an HFA of two floats // because when HFA are enabled, normally we would use two FP registers to pass or return it // // But if we don't have support for multiple register return types, we have to change this. // Since what we have is an 8-byte struct (float + float) we change useType to TYP_I_IMPL // so that the struct is returned instead using an 8-byte integer register. // if ((FEATURE_MULTIREG_RET == 0) && (useType == TYP_UNKNOWN) && (structSize == (2 * sizeof(float))) && IsHfa(clsHnd)) { useType = TYP_I_IMPL; howToReturnStruct = SPK_PrimitiveType; } #endif // Did we change this struct type into a simple "primitive" type? if (useType != TYP_UNKNOWN) { // If so, we should have already set howToReturnStruct, too. assert(howToReturnStruct != SPK_Unknown); } else if (canReturnInRegister) // We can't replace the struct with a "primitive" type { // See if we can return this struct by value, possibly in multiple registers // or if we should return it using a return buffer register // if ((FEATURE_MULTIREG_RET == 1) && (structSize <= MAX_RET_MULTIREG_BYTES)) { // Structs that are HFA's are returned in multiple registers if (IsHfa(clsHnd)) { // HFA's of count one should have been handled by getPrimitiveTypeForStruct assert(GetHfaCount(clsHnd) >= 2); // setup wbPassType and useType indicate that this is returned by value as an HFA // using multiple registers howToReturnStruct = SPK_ByValueAsHfa; useType = TYP_STRUCT; } else // Not an HFA struct type { #ifdef UNIX_AMD64_ABI // The cases of (structDesc.eightByteCount == 1) and (structDesc.eightByteCount == 0) // should have already been handled assert(structDesc.eightByteCount > 1); // setup wbPassType and useType indicate that this is returned by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; assert(structDesc.passedInRegisters == true); #elif defined(TARGET_ARM64) // Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct assert(structSize > TARGET_POINTER_SIZE); // On ARM64 structs that are 9-16 bytes are returned by value in multiple registers // if (structSize <= (TARGET_POINTER_SIZE * 2)) { // setup wbPassType and useType indicate that this is return by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; } else // a structSize that is 17-32 bytes in size { // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_X86) // Only 8-byte structs are return in multiple registers. // We also only support multireg struct returns on x86 to match the native calling convention. // So return 8-byte structs only when the calling convention is a native calling convention. if (structSize == MAX_RET_MULTIREG_BYTES && callConv != CorInfoCallConvExtension::Managed) { // setup wbPassType and useType indicate that this is return by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; } else { // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_ARM) // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getReturnTypeForStruct (with FEATURE_MULTIREG_ARGS=1)"); #endif // TARGET_XXX } } else // (structSize > MAX_RET_MULTIREG_BYTES) || (FEATURE_MULTIREG_RET == 0) { // We have a (large) struct that can't be replaced with a "primitive" type // and can't be returned in multiple registers // We return this struct using a return buffer register // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } } // 'howToReturnStruct' must be set to one of the valid values before we return assert(howToReturnStruct != SPK_Unknown); if (wbReturnStruct != nullptr) { *wbReturnStruct = howToReturnStruct; } return useType; } /////////////////////////////////////////////////////////////////////////////// // // MEASURE_NOWAY: code to measure and rank dynamic occurrences of noway_assert. // (Just the appearances of noway_assert, whether the assert is true or false.) // This might help characterize the cost of noway_assert in non-DEBUG builds, // or determine which noway_assert should be simple DEBUG-only asserts. // /////////////////////////////////////////////////////////////////////////////// #if MEASURE_NOWAY struct FileLine { char* m_file; unsigned m_line; char* m_condStr; FileLine() : m_file(nullptr), m_line(0), m_condStr(nullptr) { } FileLine(const char* file, unsigned line, const char* condStr) : m_line(line) { size_t newSize = (strlen(file) + 1) * sizeof(char); m_file = HostAllocator::getHostAllocator().allocate<char>(newSize); strcpy_s(m_file, newSize, file); newSize = (strlen(condStr) + 1) * sizeof(char); m_condStr = HostAllocator::getHostAllocator().allocate<char>(newSize); strcpy_s(m_condStr, newSize, condStr); } FileLine(const FileLine& other) { m_file = other.m_file; m_line = other.m_line; m_condStr = other.m_condStr; } // GetHashCode() and Equals() are needed by JitHashTable static unsigned GetHashCode(FileLine fl) { assert(fl.m_file != nullptr); unsigned code = fl.m_line; for (const char* p = fl.m_file; *p != '\0'; p++) { code += *p; } // Could also add condStr. return code; } static bool Equals(FileLine fl1, FileLine fl2) { return (fl1.m_line == fl2.m_line) && (0 == strcmp(fl1.m_file, fl2.m_file)); } }; typedef JitHashTable<FileLine, FileLine, size_t, HostAllocator> FileLineToCountMap; FileLineToCountMap* NowayAssertMap; void Compiler::RecordNowayAssert(const char* filename, unsigned line, const char* condStr) { if (NowayAssertMap == nullptr) { NowayAssertMap = new (HostAllocator::getHostAllocator()) FileLineToCountMap(HostAllocator::getHostAllocator()); } FileLine fl(filename, line, condStr); size_t* pCount = NowayAssertMap->LookupPointer(fl); if (pCount == nullptr) { NowayAssertMap->Set(fl, 1); } else { ++(*pCount); } } void RecordNowayAssertGlobal(const char* filename, unsigned line, const char* condStr) { if ((JitConfig.JitMeasureNowayAssert() == 1) && (JitTls::GetCompiler() != nullptr)) { JitTls::GetCompiler()->RecordNowayAssert(filename, line, condStr); } } struct NowayAssertCountMap { size_t count; FileLine fl; NowayAssertCountMap() : count(0) { } struct compare { bool operator()(const NowayAssertCountMap& elem1, const NowayAssertCountMap& elem2) { return (ssize_t)elem2.count < (ssize_t)elem1.count; // sort in descending order } }; }; void DisplayNowayAssertMap() { if (NowayAssertMap != nullptr) { FILE* fout; LPCWSTR strJitMeasureNowayAssertFile = JitConfig.JitMeasureNowayAssertFile(); if (strJitMeasureNowayAssertFile != nullptr) { fout = _wfopen(strJitMeasureNowayAssertFile, W("a")); if (fout == nullptr) { fprintf(jitstdout, "Failed to open JitMeasureNowayAssertFile \"%ws\"\n", strJitMeasureNowayAssertFile); return; } } else { fout = jitstdout; } // Iterate noway assert map, create sorted table by occurrence, dump it. unsigned count = NowayAssertMap->GetCount(); NowayAssertCountMap* nacp = new NowayAssertCountMap[count]; unsigned i = 0; for (FileLineToCountMap::KeyIterator iter = NowayAssertMap->Begin(), end = NowayAssertMap->End(); !iter.Equal(end); ++iter) { nacp[i].count = iter.GetValue(); nacp[i].fl = iter.Get(); ++i; } jitstd::sort(nacp, nacp + count, NowayAssertCountMap::compare()); if (fout == jitstdout) { // Don't output the header if writing to a file, since we'll be appending to existing dumps in that case. fprintf(fout, "\nnoway_assert counts:\n"); fprintf(fout, "count, file, line, text\n"); } for (i = 0; i < count; i++) { fprintf(fout, "%u, %s, %u, \"%s\"\n", nacp[i].count, nacp[i].fl.m_file, nacp[i].fl.m_line, nacp[i].fl.m_condStr); } if (fout != jitstdout) { fclose(fout); fout = nullptr; } } } #endif // MEASURE_NOWAY /***************************************************************************** * variables to keep track of how many iterations we go in a dataflow pass */ #if DATAFLOW_ITER unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow #endif // DATAFLOW_ITER #if MEASURE_BLOCK_SIZE size_t genFlowNodeSize; size_t genFlowNodeCnt; #endif // MEASURE_BLOCK_SIZE /*****************************************************************************/ // We keep track of methods we've already compiled. /***************************************************************************** * Declare the statics */ #ifdef DEBUG /* static */ LONG Compiler::s_compMethodsCount = 0; // to produce unique label names #endif #if MEASURE_MEM_ALLOC /* static */ bool Compiler::s_dspMemStats = false; #endif #ifndef PROFILING_SUPPORTED const bool Compiler::Options::compNoPInvokeInlineCB = false; #endif /***************************************************************************** * * One time initialization code */ /* static */ void Compiler::compStartup() { #if DISPLAY_SIZES grossVMsize = grossNCsize = totalNCsize = 0; #endif // DISPLAY_SIZES /* Initialize the table of tree node sizes */ GenTree::InitNodeSize(); #ifdef JIT32_GCENCODER // Initialize the GC encoder lookup table GCInfo::gcInitEncoderLookupTable(); #endif /* Initialize the emitter */ emitter::emitInit(); // Static vars of ValueNumStore ValueNumStore::InitValueNumStoreStatics(); compDisplayStaticSizes(jitstdout); } /***************************************************************************** * * One time finalization code */ /* static */ void Compiler::compShutdown() { if (s_pAltJitExcludeAssembliesList != nullptr) { s_pAltJitExcludeAssembliesList->~AssemblyNamesList2(); // call the destructor s_pAltJitExcludeAssembliesList = nullptr; } #ifdef DEBUG if (s_pJitDisasmIncludeAssembliesList != nullptr) { s_pJitDisasmIncludeAssembliesList->~AssemblyNamesList2(); // call the destructor s_pJitDisasmIncludeAssembliesList = nullptr; } #endif // DEBUG #if MEASURE_NOWAY DisplayNowayAssertMap(); #endif // MEASURE_NOWAY /* Shut down the emitter */ emitter::emitDone(); #if defined(DEBUG) || defined(INLINE_DATA) // Finish reading and/or writing inline xml if (JitConfig.JitInlineDumpXmlFile() != nullptr) { FILE* file = _wfopen(JitConfig.JitInlineDumpXmlFile(), W("a")); if (file != nullptr) { InlineStrategy::FinalizeXml(file); fclose(file); } else { InlineStrategy::FinalizeXml(); } } #endif // defined(DEBUG) || defined(INLINE_DATA) #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS if (genMethodCnt == 0) { return; } #endif #if NODEBASH_STATS GenTree::ReportOperBashing(jitstdout); #endif // Where should we write our statistics output? FILE* fout = jitstdout; #ifdef FEATURE_JIT_METHOD_PERF if (compJitTimeLogFilename != nullptr) { FILE* jitTimeLogFile = _wfopen(compJitTimeLogFilename, W("a")); if (jitTimeLogFile != nullptr) { CompTimeSummaryInfo::s_compTimeSummary.Print(jitTimeLogFile); fclose(jitTimeLogFile); } } JitTimer::Shutdown(); #endif // FEATURE_JIT_METHOD_PERF #if COUNT_AST_OPERS // Add up all the counts so that we can show percentages of total unsigned totalCount = 0; for (unsigned op = 0; op < GT_COUNT; op++) { totalCount += GenTree::s_gtNodeCounts[op]; } if (totalCount > 0) { struct OperInfo { unsigned Count; unsigned Size; genTreeOps Oper; }; OperInfo opers[GT_COUNT]; for (unsigned op = 0; op < GT_COUNT; op++) { opers[op] = {GenTree::s_gtNodeCounts[op], GenTree::s_gtTrueSizes[op], static_cast<genTreeOps>(op)}; } jitstd::sort(opers, opers + ArrLen(opers), [](const OperInfo& l, const OperInfo& r) { // We'll be sorting in descending order. return l.Count >= r.Count; }); unsigned remainingCount = totalCount; unsigned remainingCountLarge = 0; unsigned remainingCountSmall = 0; unsigned countLarge = 0; unsigned countSmall = 0; fprintf(fout, "\nGenTree operator counts (approximate):\n\n"); for (OperInfo oper : opers) { unsigned size = oper.Size; unsigned count = oper.Count; double percentage = 100.0 * count / totalCount; if (size > TREE_NODE_SZ_SMALL) { countLarge += count; } else { countSmall += count; } // Let's not show anything below a threshold if (percentage >= 0.5) { fprintf(fout, " GT_%-17s %7u (%4.1lf%%) %3u bytes each\n", GenTree::OpName(oper.Oper), count, percentage, size); remainingCount -= count; } else { if (size > TREE_NODE_SZ_SMALL) { remainingCountLarge += count; } else { remainingCountSmall += count; } } } if (remainingCount > 0) { fprintf(fout, " All other GT_xxx ... %7u (%4.1lf%%) ... %4.1lf%% small + %4.1lf%% large\n", remainingCount, 100.0 * remainingCount / totalCount, 100.0 * remainingCountSmall / totalCount, 100.0 * remainingCountLarge / totalCount); } fprintf(fout, " -----------------------------------------------------\n"); fprintf(fout, " Total ....... %11u --ALL-- ... %4.1lf%% small + %4.1lf%% large\n", totalCount, 100.0 * countSmall / totalCount, 100.0 * countLarge / totalCount); fprintf(fout, "\n"); } #endif // COUNT_AST_OPERS #if DISPLAY_SIZES if (grossVMsize && grossNCsize) { fprintf(fout, "\n"); fprintf(fout, "--------------------------------------\n"); fprintf(fout, "Function and GC info size stats\n"); fprintf(fout, "--------------------------------------\n"); fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, grossNCsize, Target::g_tgtCPUName, 100 * grossNCsize / grossVMsize, "Total (excluding GC info)"); fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, totalNCsize, Target::g_tgtCPUName, 100 * totalNCsize / grossVMsize, "Total (including GC info)"); if (gcHeaderISize || gcHeaderNSize) { fprintf(fout, "\n"); fprintf(fout, "GC tables : [%7uI,%7uN] %7u byt (%u%% of IL, %u%% of %s).\n", gcHeaderISize + gcPtrMapISize, gcHeaderNSize + gcPtrMapNSize, totalNCsize - grossNCsize, 100 * (totalNCsize - grossNCsize) / grossVMsize, 100 * (totalNCsize - grossNCsize) / grossNCsize, Target::g_tgtCPUName); fprintf(fout, "GC headers : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcHeaderISize, gcHeaderNSize, gcHeaderISize + gcHeaderNSize, (float)gcHeaderISize / (genMethodICnt + 0.001), (float)gcHeaderNSize / (genMethodNCnt + 0.001), (float)(gcHeaderISize + gcHeaderNSize) / genMethodCnt); fprintf(fout, "GC ptr maps : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcPtrMapISize, gcPtrMapNSize, gcPtrMapISize + gcPtrMapNSize, (float)gcPtrMapISize / (genMethodICnt + 0.001), (float)gcPtrMapNSize / (genMethodNCnt + 0.001), (float)(gcPtrMapISize + gcPtrMapNSize) / genMethodCnt); } else { fprintf(fout, "\n"); fprintf(fout, "GC tables take up %u bytes (%u%% of instr, %u%% of %6s code).\n", totalNCsize - grossNCsize, 100 * (totalNCsize - grossNCsize) / grossVMsize, 100 * (totalNCsize - grossNCsize) / grossNCsize, Target::g_tgtCPUName); } #ifdef DEBUG #if DOUBLE_ALIGN fprintf(fout, "%u out of %u methods generated with double-aligned stack\n", Compiler::s_lvaDoubleAlignedProcsCount, genMethodCnt); #endif #endif } #endif // DISPLAY_SIZES #if CALL_ARG_STATS compDispCallArgStats(fout); #endif #if COUNT_BASIC_BLOCKS fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Basic block count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); bbCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "IL method size frequency table for methods with a single basic block:\n"); fprintf(fout, "--------------------------------------------------\n"); bbOneBBSizeTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); #endif // COUNT_BASIC_BLOCKS #if COUNT_LOOPS fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Loop stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Total number of methods with loops is %5u\n", totalLoopMethods); fprintf(fout, "Total number of loops is %5u\n", totalLoopCount); fprintf(fout, "Maximum number of loops per method is %5u\n", maxLoopsPerMethod); fprintf(fout, "# of methods overflowing nat loop table is %5u\n", totalLoopOverflows); fprintf(fout, "Total number of 'unnatural' loops is %5u\n", totalUnnatLoopCount); fprintf(fout, "# of methods overflowing unnat loop limit is %5u\n", totalUnnatLoopOverflows); fprintf(fout, "Total number of loops with an iterator is %5u\n", iterLoopCount); fprintf(fout, "Total number of loops with a constant iterator is %5u\n", constIterLoopCount); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Loop count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); loopCountTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Loop exit count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); loopExitCountTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); #endif // COUNT_LOOPS #if DATAFLOW_ITER fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Total number of iterations in the CSE dataflow loop is %5u\n", CSEiterCount); fprintf(fout, "Total number of iterations in the CF dataflow loop is %5u\n", CFiterCount); #endif // DATAFLOW_ITER #if MEASURE_NODE_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "GenTree node allocation stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Allocated %6I64u tree nodes (%7I64u bytes total, avg %4I64u bytes per method)\n", genNodeSizeStats.genTreeNodeCnt, genNodeSizeStats.genTreeNodeSize, genNodeSizeStats.genTreeNodeSize / genMethodCnt); fprintf(fout, "Allocated %7I64u bytes of unused tree node space (%3.2f%%)\n", genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize, (float)(100 * (genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize)) / genNodeSizeStats.genTreeNodeSize); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of per-method GenTree node counts:\n"); genTreeNcntHist.dump(fout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of per-method GenTree node allocations (in bytes):\n"); genTreeNsizHist.dump(fout); #endif // MEASURE_NODE_SIZE #if MEASURE_BLOCK_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "BasicBlock and flowList/BasicBlockList allocation stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Allocated %6u basic blocks (%7u bytes total, avg %4u bytes per method)\n", BasicBlock::s_Count, BasicBlock::s_Size, BasicBlock::s_Size / genMethodCnt); fprintf(fout, "Allocated %6u flow nodes (%7u bytes total, avg %4u bytes per method)\n", genFlowNodeCnt, genFlowNodeSize, genFlowNodeSize / genMethodCnt); #endif // MEASURE_BLOCK_SIZE #if MEASURE_MEM_ALLOC if (s_dspMemStats) { fprintf(fout, "\nAll allocations:\n"); ArenaAllocator::dumpAggregateMemStats(jitstdout); fprintf(fout, "\nLargest method:\n"); ArenaAllocator::dumpMaxMemStats(jitstdout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of total memory allocated per method (in KB):\n"); memAllocHist.dump(fout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of total memory used per method (in KB):\n"); memUsedHist.dump(fout); } #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS #ifdef DEBUG // Always display loop stats in retail if (JitConfig.DisplayLoopHoistStats() != 0) #endif // DEBUG { PrintAggregateLoopHoistStats(jitstdout); } #endif // LOOP_HOIST_STATS #if TRACK_ENREG_STATS if (JitConfig.JitEnregStats() != 0) { s_enregisterStats.Dump(fout); } #endif // TRACK_ENREG_STATS #if MEASURE_PTRTAB_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "GC pointer table stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Reg pointer descriptor size (internal): %8u (avg %4u per method)\n", GCInfo::s_gcRegPtrDscSize, GCInfo::s_gcRegPtrDscSize / genMethodCnt); fprintf(fout, "Total pointer table size: %8u (avg %4u per method)\n", GCInfo::s_gcTotalPtrTabSize, GCInfo::s_gcTotalPtrTabSize / genMethodCnt); #endif // MEASURE_PTRTAB_SIZE #if MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || MEASURE_PTRTAB_SIZE || DISPLAY_SIZES if (genMethodCnt != 0) { fprintf(fout, "\n"); fprintf(fout, "A total of %6u methods compiled", genMethodCnt); #if DISPLAY_SIZES if (genMethodICnt || genMethodNCnt) { fprintf(fout, " (%u interruptible, %u non-interruptible)", genMethodICnt, genMethodNCnt); } #endif // DISPLAY_SIZES fprintf(fout, ".\n"); } #endif // MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || MEASURE_PTRTAB_SIZE || DISPLAY_SIZES #if EMITTER_STATS emitterStats(fout); #endif #if MEASURE_FATAL fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Fatal errors stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, " badCode: %u\n", fatal_badCode); fprintf(fout, " noWay: %u\n", fatal_noWay); fprintf(fout, " implLimitation: %u\n", fatal_implLimitation); fprintf(fout, " NOMEM: %u\n", fatal_NOMEM); fprintf(fout, " noWayAssertBody: %u\n", fatal_noWayAssertBody); #ifdef DEBUG fprintf(fout, " noWayAssertBodyArgs: %u\n", fatal_noWayAssertBodyArgs); #endif // DEBUG fprintf(fout, " NYI: %u\n", fatal_NYI); #endif // MEASURE_FATAL } /***************************************************************************** * Display static data structure sizes. */ /* static */ void Compiler::compDisplayStaticSizes(FILE* fout) { #if MEASURE_NODE_SIZE GenTree::DumpNodeSizes(fout); #endif #if EMITTER_STATS emitterStaticStats(fout); #endif } /***************************************************************************** * * Constructor */ void Compiler::compInit(ArenaAllocator* pAlloc, CORINFO_METHOD_HANDLE methodHnd, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, InlineInfo* inlineInfo) { assert(pAlloc); compArenaAllocator = pAlloc; // Inlinee Compile object will only be allocated when needed for the 1st time. InlineeCompiler = nullptr; // Set the inline info. impInlineInfo = inlineInfo; info.compCompHnd = compHnd; info.compMethodHnd = methodHnd; info.compMethodInfo = methodInfo; #ifdef DEBUG bRangeAllowStress = false; #endif #if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS // Initialize the method name and related info, as it is used early in determining whether to // apply stress modes, and which ones to apply. // Note that even allocating memory can invoke the stress mechanism, so ensure that both // 'compMethodName' and 'compFullName' are either null or valid before we allocate. // (The stress mode checks references these prior to checking bRangeAllowStress.) // info.compMethodName = nullptr; info.compClassName = nullptr; info.compFullName = nullptr; const char* classNamePtr; const char* methodName; methodName = eeGetMethodName(methodHnd, &classNamePtr); unsigned len = (unsigned)roundUp(strlen(classNamePtr) + 1); info.compClassName = getAllocator(CMK_DebugOnly).allocate<char>(len); info.compMethodName = methodName; strcpy_s((char*)info.compClassName, len, classNamePtr); info.compFullName = eeGetMethodFullName(methodHnd); info.compPerfScore = 0.0; info.compMethodSuperPMIIndex = g_jitHost->getIntConfigValue(W("SuperPMIMethodContextNumber"), -1); #endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS #if defined(DEBUG) || defined(INLINE_DATA) info.compMethodHashPrivate = 0; #endif // defined(DEBUG) || defined(INLINE_DATA) #ifdef DEBUG // Opt-in to jit stress based on method hash ranges. // // Note the default (with JitStressRange not set) is that all // methods will be subject to stress. static ConfigMethodRange fJitStressRange; fJitStressRange.EnsureInit(JitConfig.JitStressRange()); assert(!fJitStressRange.Error()); bRangeAllowStress = fJitStressRange.Contains(info.compMethodHash()); #endif // DEBUG eeInfoInitialized = false; compDoAggressiveInlining = false; if (compIsForInlining()) { m_inlineStrategy = nullptr; compInlineResult = inlineInfo->inlineResult; } else { m_inlineStrategy = new (this, CMK_Inlining) InlineStrategy(this); compInlineResult = nullptr; } // Initialize this to the first phase to run. mostRecentlyActivePhase = PHASE_PRE_IMPORT; // Initially, no phase checks are active. activePhaseChecks = PhaseChecks::CHECK_NONE; #ifdef FEATURE_TRACELOGGING // Make sure JIT telemetry is initialized as soon as allocations can be made // but no later than a point where noway_asserts can be thrown. // 1. JIT telemetry could allocate some objects internally. // 2. NowayAsserts are tracked through telemetry. // Note: JIT telemetry could gather data when compiler is not fully initialized. // So you have to initialize the compiler variables you use for telemetry. assert((unsigned)PHASE_PRE_IMPORT == 0); info.compILCodeSize = 0; info.compMethodHnd = nullptr; compJitTelemetry.Initialize(this); #endif fgInit(); lvaInit(); if (!compIsForInlining()) { codeGen = getCodeGenerator(this); optInit(); hashBv::Init(this); compVarScopeMap = nullptr; // If this method were a real constructor for Compiler, these would // become method initializations. impPendingBlockMembers = JitExpandArray<BYTE>(getAllocator()); impSpillCliquePredMembers = JitExpandArray<BYTE>(getAllocator()); impSpillCliqueSuccMembers = JitExpandArray<BYTE>(getAllocator()); new (&genIPmappings, jitstd::placement_t()) jitstd::list<IPmappingDsc>(getAllocator(CMK_DebugInfo)); #ifdef DEBUG new (&genPreciseIPmappings, jitstd::placement_t()) jitstd::list<PreciseIPMapping>(getAllocator(CMK_DebugOnly)); #endif lvMemoryPerSsaData = SsaDefArray<SsaMemDef>(); // // Initialize all the per-method statistics gathering data structures. // optLoopsCloned = 0; #if LOOP_HOIST_STATS m_loopsConsidered = 0; m_curLoopHasHoistedExpression = false; m_loopsWithHoistedExpressions = 0; m_totalHoistedExpressions = 0; #endif // LOOP_HOIST_STATS #if MEASURE_NODE_SIZE genNodeSizeStatsPerFunc.Init(); #endif // MEASURE_NODE_SIZE } else { codeGen = nullptr; } compJmpOpUsed = false; compLongUsed = false; compTailCallUsed = false; compTailPrefixSeen = false; compLocallocSeen = false; compLocallocUsed = false; compLocallocOptimized = false; compQmarkRationalized = false; compQmarkUsed = false; compFloatingPointUsed = false; compSuppressedZeroInit = false; compNeedsGSSecurityCookie = false; compGSReorderStackLayout = false; compGeneratingProlog = false; compGeneratingEpilog = false; compLSRADone = false; compRationalIRForm = false; #ifdef DEBUG compCodeGenDone = false; opts.compMinOptsIsUsed = false; #endif opts.compMinOptsIsSet = false; // Used by fgFindJumpTargets for inlining heuristics. opts.instrCount = 0; // Used to track when we should consider running EarlyProp optMethodFlags = 0; optNoReturnCallCount = 0; #ifdef DEBUG m_nodeTestData = nullptr; m_loopHoistCSEClass = FIRST_LOOP_HOIST_CSE_CLASS; #endif m_switchDescMap = nullptr; m_blockToEHPreds = nullptr; m_fieldSeqStore = nullptr; m_zeroOffsetFieldMap = nullptr; m_arrayInfoMap = nullptr; m_refAnyClass = nullptr; for (MemoryKind memoryKind : allMemoryKinds()) { m_memorySsaMap[memoryKind] = nullptr; } #ifdef DEBUG if (!compIsForInlining()) { compDoComponentUnitTestsOnce(); } #endif // DEBUG vnStore = nullptr; m_opAsgnVarDefSsaNums = nullptr; m_nodeToLoopMemoryBlockMap = nullptr; fgSsaPassesCompleted = 0; fgVNPassesCompleted = 0; // check that HelperCallProperties are initialized assert(s_helperCallProperties.IsPure(CORINFO_HELP_GETSHARED_GCSTATIC_BASE)); assert(!s_helperCallProperties.IsPure(CORINFO_HELP_GETFIELDOBJ)); // quick sanity check // We start with the flow graph in tree-order fgOrder = FGOrderTree; m_classLayoutTable = nullptr; #ifdef FEATURE_SIMD m_simdHandleCache = nullptr; #endif // FEATURE_SIMD compUsesThrowHelper = false; } /***************************************************************************** * * Destructor */ void Compiler::compDone() { } void* Compiler::compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ void** ppIndirection) /* OUT */ { void* addr; if (info.compMatchedVM) { addr = info.compCompHnd->getHelperFtn(ftnNum, ppIndirection); } else { // If we don't have a matched VM, we won't get valid results when asking for a helper function. addr = UlongToPtr(0xCA11CA11); // "callcall" } return addr; } unsigned Compiler::compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd) { var_types sigType = genActualType(JITtype2varType(cit)); unsigned sigSize; sigSize = genTypeSize(sigType); if (cit == CORINFO_TYPE_VALUECLASS) { sigSize = info.compCompHnd->getClassSize(clsHnd); } else if (cit == CORINFO_TYPE_REFANY) { sigSize = 2 * TARGET_POINTER_SIZE; } return sigSize; } #ifdef DEBUG static bool DidComponentUnitTests = false; void Compiler::compDoComponentUnitTestsOnce() { if (!JitConfig.RunComponentUnitTests()) { return; } if (!DidComponentUnitTests) { DidComponentUnitTests = true; ValueNumStore::RunTests(this); BitSetSupport::TestSuite(getAllocatorDebugOnly()); } } //------------------------------------------------------------------------ // compGetJitDefaultFill: // // Return Value: // An unsigned char value used to initizalize memory allocated by the JIT. // The default value is taken from COMPLUS_JitDefaultFill, if is not set // the value will be 0xdd. When JitStress is active a random value based // on the method hash is used. // // Notes: // Note that we can't use small values like zero, because we have some // asserts that can fire for such values. // // static unsigned char Compiler::compGetJitDefaultFill(Compiler* comp) { unsigned char defaultFill = (unsigned char)JitConfig.JitDefaultFill(); if (comp != nullptr && comp->compStressCompile(STRESS_GENERIC_VARN, 50)) { unsigned temp; temp = comp->info.compMethodHash(); temp = (temp >> 16) ^ temp; temp = (temp >> 8) ^ temp; temp = temp & 0xff; // asserts like this: assert(!IsUninitialized(stkLvl)); // mean that small values for defaultFill are problematic // so we make the value larger in that case. if (temp < 0x20) { temp |= 0x80; } // Make a misaligned pointer value to reduce probability of getting a valid value and firing // assert(!IsUninitialized(pointer)). temp |= 0x1; defaultFill = (unsigned char)temp; } return defaultFill; } #endif // DEBUG /*****************************************************************************/ #ifdef DEBUG /*****************************************************************************/ VarName Compiler::compVarName(regNumber reg, bool isFloatReg) { if (isFloatReg) { assert(genIsValidFloatReg(reg)); } else { assert(genIsValidReg(reg)); } if ((info.compVarScopesCount > 0) && compCurBB && opts.varNames) { unsigned lclNum; LclVarDsc* varDsc; /* Look for the matching register */ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { /* If the variable is not in a register, or not in the register we're looking for, quit. */ /* Also, if it is a compiler generated variable (i.e. slot# > info.compVarScopesCount), don't bother. */ if ((varDsc->lvRegister != 0) && (varDsc->GetRegNum() == reg) && (varDsc->lvSlotNum < info.compVarScopesCount)) { /* check if variable in that register is live */ if (VarSetOps::IsMember(this, compCurLife, varDsc->lvVarIndex)) { /* variable is live - find the corresponding slot */ VarScopeDsc* varScope = compFindLocalVar(varDsc->lvSlotNum, compCurBB->bbCodeOffs, compCurBB->bbCodeOffsEnd); if (varScope) { return varScope->vsdName; } } } } } return nullptr; } const char* Compiler::compRegVarName(regNumber reg, bool displayVar, bool isFloatReg) { #ifdef TARGET_ARM isFloatReg = genIsValidFloatReg(reg); #endif if (displayVar && (reg != REG_NA)) { VarName varName = compVarName(reg, isFloatReg); if (varName) { const int NAME_VAR_REG_BUFFER_LEN = 4 + 256 + 1; static char nameVarReg[2][NAME_VAR_REG_BUFFER_LEN]; // to avoid overwriting the buffer when have 2 // consecutive calls before printing static int index = 0; // for circular index into the name array index = (index + 1) % 2; // circular reuse of index sprintf_s(nameVarReg[index], NAME_VAR_REG_BUFFER_LEN, "%s'%s'", getRegName(reg), VarNameToStr(varName)); return nameVarReg[index]; } } /* no debug info required or no variable in that register -> return standard name */ return getRegName(reg); } const char* Compiler::compRegNameForSize(regNumber reg, size_t size) { if (size == 0 || size >= 4) { return compRegVarName(reg, true); } // clang-format off static const char * sizeNames[][2] = { { "al", "ax" }, { "cl", "cx" }, { "dl", "dx" }, { "bl", "bx" }, #ifdef TARGET_AMD64 { "spl", "sp" }, // ESP { "bpl", "bp" }, // EBP { "sil", "si" }, // ESI { "dil", "di" }, // EDI { "r8b", "r8w" }, { "r9b", "r9w" }, { "r10b", "r10w" }, { "r11b", "r11w" }, { "r12b", "r12w" }, { "r13b", "r13w" }, { "r14b", "r14w" }, { "r15b", "r15w" }, #endif // TARGET_AMD64 }; // clang-format on assert(isByteReg(reg)); assert(genRegMask(reg) & RBM_BYTE_REGS); assert(size == 1 || size == 2); return sizeNames[reg][size - 1]; } const char* Compiler::compLocalVarName(unsigned varNum, unsigned offs) { unsigned i; VarScopeDsc* t; for (i = 0, t = info.compVarScopes; i < info.compVarScopesCount; i++, t++) { if (t->vsdVarNum != varNum) { continue; } if (offs >= t->vsdLifeBeg && offs < t->vsdLifeEnd) { return VarNameToStr(t->vsdName); } } return nullptr; } /*****************************************************************************/ #endif // DEBUG /*****************************************************************************/ void Compiler::compSetProcessor() { // // NOTE: This function needs to be kept in sync with EEJitManager::SetCpuInfo() in vm\codeman.cpp // const JitFlags& jitFlags = *opts.jitFlags; #if defined(TARGET_ARM) info.genCPU = CPU_ARM; #elif defined(TARGET_ARM64) info.genCPU = CPU_ARM64; #elif defined(TARGET_AMD64) info.genCPU = CPU_X64; #elif defined(TARGET_X86) if (jitFlags.IsSet(JitFlags::JIT_FLAG_TARGET_P4)) info.genCPU = CPU_X86_PENTIUM_4; else info.genCPU = CPU_X86; #endif // // Processor specific optimizations // CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_AMD64 opts.compUseCMOV = true; #elif defined(TARGET_X86) opts.compUseCMOV = jitFlags.IsSet(JitFlags::JIT_FLAG_USE_CMOV); #ifdef DEBUG if (opts.compUseCMOV) opts.compUseCMOV = !compStressCompile(STRESS_USE_CMOV, 50); #endif // DEBUG #endif // TARGET_X86 // The VM will set the ISA flags depending on actual hardware support // and any specified config switches specified by the user. The exception // here is for certain "artificial ISAs" such as Vector64/128/256 where they // don't actually exist. The JIT is in charge of adding those and ensuring // the total sum of flags is still valid. CORINFO_InstructionSetFlags instructionSetFlags = jitFlags.GetInstructionSetFlags(); opts.compSupportsISA = 0; opts.compSupportsISAReported = 0; opts.compSupportsISAExactly = 0; #if defined(TARGET_XARCH) instructionSetFlags.AddInstructionSet(InstructionSet_Vector128); instructionSetFlags.AddInstructionSet(InstructionSet_Vector256); #endif // TARGET_XARCH #if defined(TARGET_ARM64) instructionSetFlags.AddInstructionSet(InstructionSet_Vector64); instructionSetFlags.AddInstructionSet(InstructionSet_Vector128); #endif // TARGET_ARM64 instructionSetFlags = EnsureInstructionSetFlagsAreValid(instructionSetFlags); opts.setSupportedISAs(instructionSetFlags); #ifdef TARGET_XARCH if (!compIsForInlining()) { if (canUseVexEncoding()) { codeGen->GetEmitter()->SetUseVEXEncoding(true); // Assume each JITted method does not contain AVX instruction at first codeGen->GetEmitter()->SetContainsAVX(false); codeGen->GetEmitter()->SetContains256bitAVX(false); } } #endif // TARGET_XARCH } bool Compiler::notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const { const char* isaString = InstructionSetToString(isa); JITDUMP("Notify VM instruction set (%s) %s be supported.\n", isaString, supported ? "must" : "must not"); return info.compCompHnd->notifyInstructionSetUsage(isa, supported); } #ifdef PROFILING_SUPPORTED // A Dummy routine to receive Enter/Leave/Tailcall profiler callbacks. // These are used when complus_JitEltHookEnabled=1 #ifdef TARGET_AMD64 void DummyProfilerELTStub(UINT_PTR ProfilerHandle, UINT_PTR callerSP) { return; } #else //! TARGET_AMD64 void DummyProfilerELTStub(UINT_PTR ProfilerHandle) { return; } #endif //! TARGET_AMD64 #endif // PROFILING_SUPPORTED bool Compiler::compShouldThrowOnNoway( #ifdef FEATURE_TRACELOGGING const char* filename, unsigned line #endif ) { #ifdef FEATURE_TRACELOGGING compJitTelemetry.NotifyNowayAssert(filename, line); #endif // In min opts, we don't want the noway assert to go through the exception // path. Instead we want it to just silently go through codegen for // compat reasons. return !opts.MinOpts(); } // ConfigInteger does not offer an option for decimal flags. Any numbers are interpreted as hex. // I could add the decimal option to ConfigInteger or I could write a function to reinterpret this // value as the user intended. unsigned ReinterpretHexAsDecimal(unsigned in) { // ex: in: 0x100 returns: 100 unsigned result = 0; unsigned index = 1; // default value if (in == INT_MAX) { return in; } while (in) { unsigned digit = in % 16; in >>= 4; assert(digit < 10); result += digit * index; index *= 10; } return result; } void Compiler::compInitOptions(JitFlags* jitFlags) { #ifdef UNIX_AMD64_ABI opts.compNeedToAlignFrame = false; #endif // UNIX_AMD64_ABI memset(&opts, 0, sizeof(opts)); if (compIsForInlining()) { // The following flags are lost when inlining. (They are removed in // Compiler::fgInvokeInlineeCompiler().) assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_ENTERLEAVE)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_EnC)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_TRACK_TRANSITIONS)); } opts.jitFlags = jitFlags; opts.compFlags = CLFLG_MAXOPT; // Default value is for full optimization if (jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE) || jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) || jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { opts.compFlags = CLFLG_MINOPT; } // Don't optimize .cctors (except prejit) or if we're an inlinee else if (!jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && ((info.compFlags & FLG_CCTOR) == FLG_CCTOR) && !compIsForInlining()) { opts.compFlags = CLFLG_MINOPT; } // Default value is to generate a blend of size and speed optimizations // opts.compCodeOpt = BLENDED_CODE; // If the EE sets SIZE_OPT or if we are compiling a Class constructor // we will optimize for code size at the expense of speed // if (jitFlags->IsSet(JitFlags::JIT_FLAG_SIZE_OPT) || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR)) { opts.compCodeOpt = SMALL_CODE; } // // If the EE sets SPEED_OPT we will optimize for speed at the expense of code size // else if (jitFlags->IsSet(JitFlags::JIT_FLAG_SPEED_OPT) || (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1) && !jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT))) { opts.compCodeOpt = FAST_CODE; assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_SIZE_OPT)); } //------------------------------------------------------------------------- opts.compDbgCode = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE); opts.compDbgInfo = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_INFO); opts.compDbgEnC = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_EnC); #ifdef DEBUG opts.compJitAlignLoopAdaptive = JitConfig.JitAlignLoopAdaptive() == 1; opts.compJitAlignLoopBoundary = (unsigned short)JitConfig.JitAlignLoopBoundary(); opts.compJitAlignLoopMinBlockWeight = (unsigned short)JitConfig.JitAlignLoopMinBlockWeight(); opts.compJitAlignLoopForJcc = JitConfig.JitAlignLoopForJcc() == 1; opts.compJitAlignLoopMaxCodeSize = (unsigned short)JitConfig.JitAlignLoopMaxCodeSize(); opts.compJitHideAlignBehindJmp = JitConfig.JitHideAlignBehindJmp() == 1; opts.compJitOptimizeStructHiddenBuffer = JitConfig.JitOptimizeStructHiddenBuffer() == 1; #else opts.compJitAlignLoopAdaptive = true; opts.compJitAlignLoopBoundary = DEFAULT_ALIGN_LOOP_BOUNDARY; opts.compJitAlignLoopMinBlockWeight = DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT; opts.compJitAlignLoopMaxCodeSize = DEFAULT_MAX_LOOPSIZE_FOR_ALIGN; opts.compJitHideAlignBehindJmp = true; opts.compJitOptimizeStructHiddenBuffer = true; #endif #ifdef TARGET_XARCH if (opts.compJitAlignLoopAdaptive) { // For adaptive alignment, padding limit is equal to the max instruction encoding // size which is 15 bytes. Hence (32 >> 1) - 1 = 15 bytes. opts.compJitAlignPaddingLimit = (opts.compJitAlignLoopBoundary >> 1) - 1; } else { // For non-adaptive alignment, padding limit is 1 less than the alignment boundary // specified. opts.compJitAlignPaddingLimit = opts.compJitAlignLoopBoundary - 1; } #elif TARGET_ARM64 if (opts.compJitAlignLoopAdaptive) { // For adaptive alignment, padding limit is same as specified by the alignment // boundary because all instructions are 4 bytes long. Hence (32 >> 1) = 16 bytes. opts.compJitAlignPaddingLimit = (opts.compJitAlignLoopBoundary >> 1); } else { // For non-adaptive, padding limit is same as specified by the alignment. opts.compJitAlignPaddingLimit = opts.compJitAlignLoopBoundary; } #endif assert(isPow2(opts.compJitAlignLoopBoundary)); #ifdef TARGET_ARM64 // The minimum encoding size for Arm64 is 4 bytes. assert(opts.compJitAlignLoopBoundary >= 4); #endif #if REGEN_SHORTCUTS || REGEN_CALLPAT // We never want to have debugging enabled when regenerating GC encoding patterns opts.compDbgCode = false; opts.compDbgInfo = false; opts.compDbgEnC = false; #endif compSetProcessor(); #ifdef DEBUG opts.dspOrder = false; // Optionally suppress inliner compiler instance dumping. // if (compIsForInlining()) { if (JitConfig.JitDumpInlinePhases() > 0) { verbose = impInlineInfo->InlinerCompiler->verbose; } else { verbose = false; } } else { verbose = false; codeGen->setVerbose(false); } verboseTrees = verbose && shouldUseVerboseTrees(); verboseSsa = verbose && shouldUseVerboseSsa(); asciiTrees = shouldDumpASCIITrees(); opts.dspDiffable = compIsForInlining() ? impInlineInfo->InlinerCompiler->opts.dspDiffable : false; #endif opts.altJit = false; #if defined(LATE_DISASM) && !defined(DEBUG) // For non-debug builds with the late disassembler built in, we currently always do late disassembly // (we have no way to determine when not to, since we don't have class/method names). // In the DEBUG case, this is initialized to false, below. opts.doLateDisasm = true; #endif #ifdef DEBUG const JitConfigValues::MethodSet* pfAltJit; if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { pfAltJit = &JitConfig.AltJitNgen(); } else { pfAltJit = &JitConfig.AltJit(); } if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { if (pfAltJit->contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.altJit = true; } unsigned altJitLimit = ReinterpretHexAsDecimal(JitConfig.AltJitLimit()); if (altJitLimit > 0 && Compiler::jitTotalMethodCompiled >= altJitLimit) { opts.altJit = false; } } #else // !DEBUG const char* altJitVal; if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { altJitVal = JitConfig.AltJitNgen().list(); } else { altJitVal = JitConfig.AltJit().list(); } if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { // In release mode, you either get all methods or no methods. You must use "*" as the parameter, or we ignore // it. You don't get to give a regular expression of methods to match. // (Partially, this is because we haven't computed and stored the method and class name except in debug, and it // might be expensive to do so.) if ((altJitVal != nullptr) && (strcmp(altJitVal, "*") == 0)) { opts.altJit = true; } } #endif // !DEBUG // Take care of COMPlus_AltJitExcludeAssemblies. if (opts.altJit) { // First, initialize the AltJitExcludeAssemblies list, but only do it once. if (!s_pAltJitExcludeAssembliesListInitialized) { const WCHAR* wszAltJitExcludeAssemblyList = JitConfig.AltJitExcludeAssemblies(); if (wszAltJitExcludeAssemblyList != nullptr) { // NOTE: The Assembly name list is allocated in the process heap, not in the no-release heap, which is // reclaimed // for every compilation. This is ok because we only allocate once, due to the static. s_pAltJitExcludeAssembliesList = new (HostAllocator::getHostAllocator()) AssemblyNamesList2(wszAltJitExcludeAssemblyList, HostAllocator::getHostAllocator()); } s_pAltJitExcludeAssembliesListInitialized = true; } if (s_pAltJitExcludeAssembliesList != nullptr) { // We have an exclusion list. See if this method is in an assembly that is on the list. // Note that we check this for every method, since we might inline across modules, and // if the inlinee module is on the list, we don't want to use the altjit for it. const char* methodAssemblyName = info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); if (s_pAltJitExcludeAssembliesList->IsInList(methodAssemblyName)) { opts.altJit = false; } } } #ifdef DEBUG bool altJitConfig = !pfAltJit->isEmpty(); // If we have a non-empty AltJit config then we change all of these other // config values to refer only to the AltJit. Otherwise, a lot of COMPlus_* variables // would apply to both the altjit and the normal JIT, but we only care about // debugging the altjit if the COMPlus_AltJit configuration is set. // if (compIsForImportOnly() && (!altJitConfig || opts.altJit)) { if (JitConfig.JitImportBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { assert(!"JitImportBreak reached"); } } bool verboseDump = false; if (!altJitConfig || opts.altJit) { // We should only enable 'verboseDump' when we are actually compiling a matching method // and not enable it when we are just considering inlining a matching method. // if (!compIsForInlining()) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if (JitConfig.NgenDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { verboseDump = true; } unsigned ngenHashDumpVal = (unsigned)JitConfig.NgenHashDump(); if ((ngenHashDumpVal != (DWORD)-1) && (ngenHashDumpVal == info.compMethodHash())) { verboseDump = true; } } else { if (JitConfig.JitDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { verboseDump = true; } unsigned jitHashDumpVal = (unsigned)JitConfig.JitHashDump(); if ((jitHashDumpVal != (DWORD)-1) && (jitHashDumpVal == info.compMethodHash())) { verboseDump = true; } } } } // Optionally suppress dumping Tier0 jit requests. // if (verboseDump && jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { verboseDump = (JitConfig.JitDumpTier0() > 0); } // Optionally suppress dumping except for a specific OSR jit request. // const int dumpAtOSROffset = JitConfig.JitDumpAtOSROffset(); if (verboseDump && (dumpAtOSROffset != -1)) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { verboseDump = (((IL_OFFSET)dumpAtOSROffset) == info.compILEntry); } else { verboseDump = false; } } if (verboseDump) { verbose = true; } #endif // DEBUG #ifdef FEATURE_SIMD setUsesSIMDTypes(false); #endif // FEATURE_SIMD lvaEnregEHVars = (compEnregLocals() && JitConfig.EnableEHWriteThru()); lvaEnregMultiRegVars = (compEnregLocals() && JitConfig.EnableMultiRegLocals()); if (compIsForImportOnly()) { return; } #if FEATURE_TAILCALL_OPT // By default opportunistic tail call optimization is enabled. // Recognition is done in the importer so this must be set for // inlinees as well. opts.compTailCallOpt = true; #endif // FEATURE_TAILCALL_OPT #if FEATURE_FASTTAILCALL // By default fast tail calls are enabled. opts.compFastTailCalls = true; #endif // FEATURE_FASTTAILCALL // Profile data // fgPgoSchema = nullptr; fgPgoData = nullptr; fgPgoSchemaCount = 0; fgPgoQueryResult = E_FAIL; fgPgoFailReason = nullptr; fgPgoSource = ICorJitInfo::PgoSource::Unknown; if (jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT)) { fgPgoQueryResult = info.compCompHnd->getPgoInstrumentationResults(info.compMethodHnd, &fgPgoSchema, &fgPgoSchemaCount, &fgPgoData, &fgPgoSource); // a failed result that also has a non-NULL fgPgoSchema // indicates that the ILSize for the method no longer matches // the ILSize for the method when profile data was collected. // // We will discard the IBC data in this case // if (FAILED(fgPgoQueryResult)) { fgPgoFailReason = (fgPgoSchema != nullptr) ? "No matching PGO data" : "No PGO data"; fgPgoData = nullptr; fgPgoSchema = nullptr; } // Optionally, disable use of profile data. // else if (JitConfig.JitDisablePgo() > 0) { fgPgoFailReason = "PGO data available, but JitDisablePgo > 0"; fgPgoQueryResult = E_FAIL; fgPgoData = nullptr; fgPgoSchema = nullptr; fgPgoDisabled = true; } #ifdef DEBUG // Optionally, enable use of profile data for only some methods. // else { static ConfigMethodRange JitEnablePgoRange; JitEnablePgoRange.EnsureInit(JitConfig.JitEnablePgoRange()); // Base this decision on the root method hash, so a method either sees all available // profile data (including that for inlinees), or none of it. // const unsigned hash = impInlineRoot()->info.compMethodHash(); if (!JitEnablePgoRange.Contains(hash)) { fgPgoFailReason = "PGO data available, but method hash NOT within JitEnablePgoRange"; fgPgoQueryResult = E_FAIL; fgPgoData = nullptr; fgPgoSchema = nullptr; fgPgoDisabled = true; } } // A successful result implies a non-NULL fgPgoSchema // if (SUCCEEDED(fgPgoQueryResult)) { assert(fgPgoSchema != nullptr); } // A failed result implies a NULL fgPgoSchema // see implementation of Compiler::fgHaveProfileData() // if (FAILED(fgPgoQueryResult)) { assert(fgPgoSchema == nullptr); } #endif } if (compIsForInlining()) { return; } // The rest of the opts fields that we initialize here // should only be used when we generate code for the method // They should not be used when importing or inlining CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_TAILCALL_OPT opts.compTailCallLoopOpt = true; #endif // FEATURE_TAILCALL_OPT opts.genFPorder = true; opts.genFPopt = true; opts.instrCount = 0; opts.lvRefCount = 0; #ifdef PROFILING_SUPPORTED opts.compJitELTHookEnabled = false; #endif // PROFILING_SUPPORTED #if defined(TARGET_ARM64) // 0 is default: use the appropriate frame type based on the function. opts.compJitSaveFpLrWithCalleeSavedRegisters = 0; #endif // defined(TARGET_ARM64) #ifdef DEBUG opts.dspInstrs = false; opts.dspLines = false; opts.varNames = false; opts.dmpHex = false; opts.disAsm = false; opts.disAsmSpilled = false; opts.disDiffable = false; opts.disAddr = false; opts.disAlignment = false; opts.dspCode = false; opts.dspEHTable = false; opts.dspDebugInfo = false; opts.dspGCtbls = false; opts.disAsm2 = false; opts.dspUnwind = false; opts.compLongAddress = false; opts.optRepeat = false; #ifdef LATE_DISASM opts.doLateDisasm = false; #endif // LATE_DISASM compDebugBreak = false; // If we have a non-empty AltJit config then we change all of these other // config values to refer only to the AltJit. // if (!altJitConfig || opts.altJit) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if ((JitConfig.NgenOrder() & 1) == 1) { opts.dspOrder = true; } if (JitConfig.NgenGCDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspGCtbls = true; } if (JitConfig.NgenDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.disAsm = true; } if (JitConfig.NgenDisasm().contains("SPILLED", nullptr, nullptr)) { opts.disAsmSpilled = true; } if (JitConfig.NgenUnwindDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspUnwind = true; } if (JitConfig.NgenEHDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspEHTable = true; } if (JitConfig.NgenDebugDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspDebugInfo = true; } } else { bool disEnabled = true; // Setup assembly name list for disassembly, if not already set up. if (!s_pJitDisasmIncludeAssembliesListInitialized) { const WCHAR* assemblyNameList = JitConfig.JitDisasmAssemblies(); if (assemblyNameList != nullptr) { s_pJitDisasmIncludeAssembliesList = new (HostAllocator::getHostAllocator()) AssemblyNamesList2(assemblyNameList, HostAllocator::getHostAllocator()); } s_pJitDisasmIncludeAssembliesListInitialized = true; } // If we have an assembly name list for disassembly, also check this method's assembly. if (s_pJitDisasmIncludeAssembliesList != nullptr && !s_pJitDisasmIncludeAssembliesList->IsEmpty()) { const char* assemblyName = info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); if (!s_pJitDisasmIncludeAssembliesList->IsInList(assemblyName)) { disEnabled = false; } } if (disEnabled) { if ((JitConfig.JitOrder() & 1) == 1) { opts.dspOrder = true; } if (JitConfig.JitGCDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspGCtbls = true; } if (JitConfig.JitDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.disAsm = true; } if (JitConfig.JitDisasm().contains("SPILLED", nullptr, nullptr)) { opts.disAsmSpilled = true; } if (JitConfig.JitUnwindDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspUnwind = true; } if (JitConfig.JitEHDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspEHTable = true; } if (JitConfig.JitDebugDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspDebugInfo = true; } } } if (opts.disAsm && JitConfig.JitDisasmWithGC()) { opts.disasmWithGC = true; } #ifdef LATE_DISASM if (JitConfig.JitLateDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) opts.doLateDisasm = true; #endif // LATE_DISASM // This one applies to both Ngen/Jit Disasm output: COMPlus_JitDiffableDasm=1 if (JitConfig.DiffableDasm() != 0) { opts.disDiffable = true; opts.dspDiffable = true; } // This one applies to both Ngen/Jit Disasm output: COMPlus_JitDasmWithAddress=1 if (JitConfig.JitDasmWithAddress() != 0) { opts.disAddr = true; } if (JitConfig.JitDasmWithAlignmentBoundaries() != 0) { opts.disAlignment = true; } if (JitConfig.JitLongAddress() != 0) { opts.compLongAddress = true; } if (JitConfig.JitOptRepeat().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.optRepeat = true; } } if (verboseDump) { opts.dspCode = true; opts.dspEHTable = true; opts.dspGCtbls = true; opts.disAsm2 = true; opts.dspUnwind = true; verbose = true; verboseTrees = shouldUseVerboseTrees(); verboseSsa = shouldUseVerboseSsa(); codeGen->setVerbose(true); } treesBeforeAfterMorph = (JitConfig.TreesBeforeAfterMorph() == 1); morphNum = 0; // Initialize the morphed-trees counting. expensiveDebugCheckLevel = JitConfig.JitExpensiveDebugCheckLevel(); if (expensiveDebugCheckLevel == 0) { // If we're in a stress mode that modifies the flowgraph, make 1 the default. if (fgStressBBProf() || compStressCompile(STRESS_DO_WHILE_LOOPS, 30)) { expensiveDebugCheckLevel = 1; } } if (verbose) { printf("****** START compiling %s (MethodHash=%08x)\n", info.compFullName, info.compMethodHash()); printf("Generating code for %s %s\n", Target::g_tgtPlatformName(), Target::g_tgtCPUName); printf(""); // in our logic this causes a flush } if (JitConfig.JitBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { assert(!"JitBreak reached"); } unsigned jitHashBreakVal = (unsigned)JitConfig.JitHashBreak(); if ((jitHashBreakVal != (DWORD)-1) && (jitHashBreakVal == info.compMethodHash())) { assert(!"JitHashBreak reached"); } if (verbose || JitConfig.JitDebugBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args) || JitConfig.JitBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { compDebugBreak = true; } memset(compActiveStressModes, 0, sizeof(compActiveStressModes)); // Read function list, if not already read, and there exists such a list. if (!s_pJitFunctionFileInitialized) { const WCHAR* functionFileName = JitConfig.JitFunctionFile(); if (functionFileName != nullptr) { s_pJitMethodSet = new (HostAllocator::getHostAllocator()) MethodSet(functionFileName, HostAllocator::getHostAllocator()); } s_pJitFunctionFileInitialized = true; } #endif // DEBUG //------------------------------------------------------------------------- #ifdef DEBUG assert(!codeGen->isGCTypeFixed()); opts.compGcChecks = (JitConfig.JitGCChecks() != 0) || compStressCompile(STRESS_GENERIC_VARN, 5); #endif #if defined(DEBUG) && defined(TARGET_XARCH) enum { STACK_CHECK_ON_RETURN = 0x1, STACK_CHECK_ON_CALL = 0x2, STACK_CHECK_ALL = 0x3 }; DWORD dwJitStackChecks = JitConfig.JitStackChecks(); if (compStressCompile(STRESS_GENERIC_VARN, 5)) { dwJitStackChecks = STACK_CHECK_ALL; } opts.compStackCheckOnRet = (dwJitStackChecks & DWORD(STACK_CHECK_ON_RETURN)) != 0; #if defined(TARGET_X86) opts.compStackCheckOnCall = (dwJitStackChecks & DWORD(STACK_CHECK_ON_CALL)) != 0; #endif // defined(TARGET_X86) #endif // defined(DEBUG) && defined(TARGET_XARCH) #if MEASURE_MEM_ALLOC s_dspMemStats = (JitConfig.DisplayMemStats() != 0); #endif #ifdef PROFILING_SUPPORTED opts.compNoPInvokeInlineCB = jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_NO_PINVOKE_INLINE); // Cache the profiler handle if (jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_ENTERLEAVE)) { bool hookNeeded; bool indirected; info.compCompHnd->GetProfilingHandle(&hookNeeded, &compProfilerMethHnd, &indirected); compProfilerHookNeeded = !!hookNeeded; compProfilerMethHndIndirected = !!indirected; } else { compProfilerHookNeeded = false; compProfilerMethHnd = nullptr; compProfilerMethHndIndirected = false; } // Honour COMPlus_JitELTHookEnabled or STRESS_PROFILER_CALLBACKS stress mode // only if VM has not asked us to generate profiler hooks in the first place. // That is, override VM only if it hasn't asked for a profiler callback for this method. // Don't run this stress mode when pre-JITing, as we would need to emit a relocation // for the call to the fake ELT hook, which wouldn't make sense, as we can't store that // in the pre-JIT image. if (!compProfilerHookNeeded) { if ((JitConfig.JitELTHookEnabled() != 0) || (!jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && compStressCompile(STRESS_PROFILER_CALLBACKS, 5))) { opts.compJitELTHookEnabled = true; } } // TBD: Exclude PInvoke stubs if (opts.compJitELTHookEnabled) { compProfilerMethHnd = (void*)DummyProfilerELTStub; compProfilerMethHndIndirected = false; } #endif // PROFILING_SUPPORTED #if FEATURE_TAILCALL_OPT const WCHAR* strTailCallOpt = JitConfig.TailCallOpt(); if (strTailCallOpt != nullptr) { opts.compTailCallOpt = (UINT)_wtoi(strTailCallOpt) != 0; } if (JitConfig.TailCallLoopOpt() == 0) { opts.compTailCallLoopOpt = false; } #endif #if FEATURE_FASTTAILCALL if (JitConfig.FastTailCalls() == 0) { opts.compFastTailCalls = false; } #endif // FEATURE_FASTTAILCALL #ifdef CONFIGURABLE_ARM_ABI opts.compUseSoftFP = jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI); unsigned int softFPConfig = opts.compUseSoftFP ? 2 : 1; unsigned int oldSoftFPConfig = InterlockedCompareExchange(&GlobalJitOptions::compUseSoftFPConfigured, softFPConfig, 0); if (oldSoftFPConfig != softFPConfig && oldSoftFPConfig != 0) { // There are no current scenarios where the abi can change during the lifetime of a process // that uses the JIT. If such a change occurs, either compFeatureHfa will need to change to a TLS static // or we will need to have some means to reset the flag safely. NO_WAY("SoftFP ABI setting changed during lifetime of process"); } GlobalJitOptions::compFeatureHfa = !opts.compUseSoftFP; #elif defined(ARM_SOFTFP) && defined(TARGET_ARM) // Armel is unconditionally enabled in the JIT. Verify that the VM side agrees. assert(jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI)); #elif defined(TARGET_ARM) assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI)); #endif // CONFIGURABLE_ARM_ABI opts.compScopeInfo = opts.compDbgInfo; #ifdef LATE_DISASM codeGen->getDisAssembler().disOpenForLateDisAsm(info.compMethodName, info.compClassName, info.compMethodInfo->args.pSig); #endif //------------------------------------------------------------------------- opts.compReloc = jitFlags->IsSet(JitFlags::JIT_FLAG_RELOC); #ifdef DEBUG #if defined(TARGET_XARCH) // Whether encoding of absolute addr as PC-rel offset is enabled opts.compEnablePCRelAddr = (JitConfig.EnablePCRelAddr() != 0); #endif #endif // DEBUG opts.compProcedureSplitting = jitFlags->IsSet(JitFlags::JIT_FLAG_PROCSPLIT); #ifdef TARGET_ARM64 // TODO-ARM64-NYI: enable hot/cold splitting opts.compProcedureSplitting = false; #endif // TARGET_ARM64 #ifdef DEBUG opts.compProcedureSplittingEH = opts.compProcedureSplitting; #endif // DEBUG if (opts.compProcedureSplitting) { // Note that opts.compdbgCode is true under ngen for checked assemblies! opts.compProcedureSplitting = !opts.compDbgCode; #ifdef DEBUG // JitForceProcedureSplitting is used to force procedure splitting on checked assemblies. // This is useful for debugging on a checked build. Note that we still only do procedure // splitting in the zapper. if (JitConfig.JitForceProcedureSplitting().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplitting = true; } // JitNoProcedureSplitting will always disable procedure splitting. if (JitConfig.JitNoProcedureSplitting().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplitting = false; } // // JitNoProcedureSplittingEH will disable procedure splitting in functions with EH. if (JitConfig.JitNoProcedureSplittingEH().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplittingEH = false; } #endif } #ifdef DEBUG // Now, set compMaxUncheckedOffsetForNullObject for STRESS_NULL_OBJECT_CHECK if (compStressCompile(STRESS_NULL_OBJECT_CHECK, 30)) { compMaxUncheckedOffsetForNullObject = (size_t)JitConfig.JitMaxUncheckedOffset(); if (verbose) { printf("STRESS_NULL_OBJECT_CHECK: compMaxUncheckedOffsetForNullObject=0x%X\n", compMaxUncheckedOffsetForNullObject); } } if (verbose) { // If we are compiling for a specific tier, make that very obvious in the output. // Note that we don't expect multiple TIER flags to be set at one time, but there // is nothing preventing that. if (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { printf("OPTIONS: Tier-0 compilation (set COMPlus_TieredCompilation=0 to disable)\n"); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1)) { printf("OPTIONS: Tier-1 compilation\n"); } if (compSwitchedToOptimized) { printf("OPTIONS: Tier-0 compilation, switched to FullOpts\n"); } if (compSwitchedToMinOpts) { printf("OPTIONS: Tier-1/FullOpts compilation, switched to MinOpts\n"); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { printf("OPTIONS: OSR variant with entry point 0x%x\n", info.compILEntry); } printf("OPTIONS: compCodeOpt = %s\n", (opts.compCodeOpt == BLENDED_CODE) ? "BLENDED_CODE" : (opts.compCodeOpt == SMALL_CODE) ? "SMALL_CODE" : (opts.compCodeOpt == FAST_CODE) ? "FAST_CODE" : "UNKNOWN_CODE"); printf("OPTIONS: compDbgCode = %s\n", dspBool(opts.compDbgCode)); printf("OPTIONS: compDbgInfo = %s\n", dspBool(opts.compDbgInfo)); printf("OPTIONS: compDbgEnC = %s\n", dspBool(opts.compDbgEnC)); printf("OPTIONS: compProcedureSplitting = %s\n", dspBool(opts.compProcedureSplitting)); printf("OPTIONS: compProcedureSplittingEH = %s\n", dspBool(opts.compProcedureSplittingEH)); if (jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) && fgHaveProfileData()) { printf("OPTIONS: optimized using %s profile data\n", pgoSourceToString(fgPgoSource)); } if (fgPgoFailReason != nullptr) { printf("OPTIONS: %s\n", fgPgoFailReason); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { printf("OPTIONS: Jit invoked for ngen\n"); } } #endif #ifdef PROFILING_SUPPORTED #ifdef UNIX_AMD64_ABI if (compIsProfilerHookNeeded()) { opts.compNeedToAlignFrame = true; } #endif // UNIX_AMD64_ABI #endif #if defined(DEBUG) && defined(TARGET_ARM64) if ((s_pJitMethodSet == nullptr) || s_pJitMethodSet->IsActiveMethod(info.compFullName, info.compMethodHash())) { opts.compJitSaveFpLrWithCalleeSavedRegisters = JitConfig.JitSaveFpLrWithCalleeSavedRegisters(); } #endif // defined(DEBUG) && defined(TARGET_ARM64) } #ifdef DEBUG bool Compiler::compJitHaltMethod() { /* This method returns true when we use an INS_BREAKPOINT to allow us to step into the generated native code */ /* Note that this these two "Jit" environment variables also work for ngen images */ if (JitConfig.JitHalt().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } /* Use this Hash variant when there are a lot of method with the same name and different signatures */ unsigned fJitHashHaltVal = (unsigned)JitConfig.JitHashHalt(); if ((fJitHashHaltVal != (unsigned)-1) && (fJitHashHaltVal == info.compMethodHash())) { return true; } return false; } /***************************************************************************** * Should we use a "stress-mode" for the given stressArea. We have different * areas to allow the areas to be mixed in different combinations in * different methods. * 'weight' indicates how often (as a percentage) the area should be stressed. * It should reflect the usefulness:overhead ratio. */ const LPCWSTR Compiler::s_compStressModeNames[STRESS_COUNT + 1] = { #define STRESS_MODE(mode) W("STRESS_") W(#mode), STRESS_MODES #undef STRESS_MODE }; //------------------------------------------------------------------------ // compStressCompile: determine if a stress mode should be enabled // // Arguments: // stressArea - stress mode to possibly enable // weight - percent of time this mode should be turned on // (range 0 to 100); weight 0 effectively disables // // Returns: // true if this stress mode is enabled // // Notes: // Methods may be excluded from stress via name or hash. // // Particular stress modes may be disabled or forcibly enabled. // // With JitStress=2, some stress modes are enabled regardless of weight; // these modes are the ones after COUNT_VARN in the enumeration. // // For other modes or for nonzero JitStress values, stress will be // enabled selectively for roughly weight% of methods. // bool Compiler::compStressCompile(compStressArea stressArea, unsigned weight) { // This can be called early, before info is fully set up. if ((info.compMethodName == nullptr) || (info.compFullName == nullptr)) { return false; } // Inlinees defer to the root method for stress, so that we can // more easily isolate methods that cause stress failures. if (compIsForInlining()) { return impInlineRoot()->compStressCompile(stressArea, weight); } const bool doStress = compStressCompileHelper(stressArea, weight); if (doStress && !compActiveStressModes[stressArea]) { if (verbose) { printf("\n\n*** JitStress: %ws ***\n\n", s_compStressModeNames[stressArea]); } compActiveStressModes[stressArea] = 1; } return doStress; } //------------------------------------------------------------------------ // compStressCompileHelper: helper to determine if a stress mode should be enabled // // Arguments: // stressArea - stress mode to possibly enable // weight - percent of time this mode should be turned on // (range 0 to 100); weight 0 effectively disables // // Returns: // true if this stress mode is enabled // // Notes: // See compStressCompile // bool Compiler::compStressCompileHelper(compStressArea stressArea, unsigned weight) { if (!bRangeAllowStress) { return false; } if (!JitConfig.JitStressOnly().isEmpty() && !JitConfig.JitStressOnly().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return false; } // Does user explicitly prevent using this STRESS_MODE through the command line? const WCHAR* strStressModeNamesNot = JitConfig.JitStressModeNamesNot(); if ((strStressModeNamesNot != nullptr) && (wcsstr(strStressModeNamesNot, s_compStressModeNames[stressArea]) != nullptr)) { return false; } // Does user explicitly set this STRESS_MODE through the command line? const WCHAR* strStressModeNames = JitConfig.JitStressModeNames(); if (strStressModeNames != nullptr) { if (wcsstr(strStressModeNames, s_compStressModeNames[stressArea]) != nullptr) { return true; } // This stress mode name did not match anything in the stress // mode allowlist. If user has requested only enable mode, // don't allow this stress mode to turn on. const bool onlyEnableMode = JitConfig.JitStressModeNamesOnly() != 0; if (onlyEnableMode) { return false; } } // 0: No stress (Except when explicitly set in complus_JitStressModeNames) // !=2: Vary stress. Performance will be slightly/moderately degraded // 2: Check-all stress. Performance will be REALLY horrible const int stressLevel = getJitStressLevel(); assert(weight <= MAX_STRESS_WEIGHT); // Check for boundary conditions if (stressLevel == 0 || weight == 0) { return false; } // Should we allow unlimited stress ? if ((stressArea > STRESS_COUNT_VARN) && (stressLevel == 2)) { return true; } if (weight == MAX_STRESS_WEIGHT) { return true; } // Get a hash which can be compared with 'weight' assert(stressArea != 0); const unsigned hash = (info.compMethodHash() ^ stressArea ^ stressLevel) % MAX_STRESS_WEIGHT; assert(hash < MAX_STRESS_WEIGHT && weight <= MAX_STRESS_WEIGHT); return (hash < weight); } //------------------------------------------------------------------------ // compPromoteFewerStructs: helper to determine if the local // should not be promoted under a stress mode. // // Arguments: // lclNum - local number to test // // Returns: // true if this local should not be promoted. // // Notes: // Reject ~50% of the potential promotions if STRESS_PROMOTE_FEWER_STRUCTS is active. // bool Compiler::compPromoteFewerStructs(unsigned lclNum) { bool rejectThisPromo = false; const bool promoteLess = compStressCompile(STRESS_PROMOTE_FEWER_STRUCTS, 50); if (promoteLess) { rejectThisPromo = (((info.compMethodHash() ^ lclNum) & 1) == 0); } return rejectThisPromo; } #endif // DEBUG void Compiler::compInitDebuggingInfo() { #ifdef DEBUG if (verbose) { printf("*************** In compInitDebuggingInfo() for %s\n", info.compFullName); } #endif /*------------------------------------------------------------------------- * * Get hold of the local variable records, if there are any */ info.compVarScopesCount = 0; if (opts.compScopeInfo) { eeGetVars(); } compInitVarScopeMap(); if (opts.compScopeInfo || opts.compDbgCode) { compInitScopeLists(); } if (opts.compDbgCode && (info.compVarScopesCount > 0)) { /* Create a new empty basic block. fgExtendDbgLifetimes() may add initialization of variables which are in scope right from the start of the (real) first BB (and therefore artificially marked as alive) into this block. */ fgEnsureFirstBBisScratch(); fgNewStmtAtEnd(fgFirstBB, gtNewNothingNode()); JITDUMP("Debuggable code - Add new %s to perform initialization of variables\n", fgFirstBB->dspToString()); } /*------------------------------------------------------------------------- * * Read the stmt-offsets table and the line-number table */ info.compStmtOffsetsImplicit = ICorDebugInfo::NO_BOUNDARIES; // We can only report debug info for EnC at places where the stack is empty. // Actually, at places where there are not live temps. Else, we won't be able // to map between the old and the new versions correctly as we won't have // any info for the live temps. assert(!opts.compDbgEnC || !opts.compDbgInfo || 0 == (info.compStmtOffsetsImplicit & ~ICorDebugInfo::STACK_EMPTY_BOUNDARIES)); info.compStmtOffsetsCount = 0; if (opts.compDbgInfo) { /* Get hold of the line# records, if there are any */ eeGetStmtOffsets(); #ifdef DEBUG if (verbose) { printf("info.compStmtOffsetsCount = %d\n", info.compStmtOffsetsCount); printf("info.compStmtOffsetsImplicit = %04Xh", info.compStmtOffsetsImplicit); if (info.compStmtOffsetsImplicit) { printf(" ( "); if (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) { printf("STACK_EMPTY "); } if (info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) { printf("NOP "); } if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { printf("CALL_SITE "); } printf(")"); } printf("\n"); IL_OFFSET* pOffs = info.compStmtOffsets; for (unsigned i = 0; i < info.compStmtOffsetsCount; i++, pOffs++) { printf("%02d) IL_%04Xh\n", i, *pOffs); } } #endif } } void Compiler::compSetOptimizationLevel() { bool theMinOptsValue; #pragma warning(suppress : 4101) unsigned jitMinOpts; if (compIsForInlining()) { theMinOptsValue = impInlineInfo->InlinerCompiler->opts.MinOpts(); goto _SetMinOpts; } theMinOptsValue = false; if (opts.compFlags == CLFLG_MINOPT) { JITLOG((LL_INFO100, "CLFLG_MINOPT set for method %s\n", info.compFullName)); theMinOptsValue = true; } #ifdef DEBUG jitMinOpts = JitConfig.JitMinOpts(); if (!theMinOptsValue && (jitMinOpts > 0)) { // jitTotalMethodCompiled does not include the method that is being compiled now, so make +1. unsigned methodCount = Compiler::jitTotalMethodCompiled + 1; unsigned methodCountMask = methodCount & 0xFFF; unsigned kind = (jitMinOpts & 0xF000000) >> 24; switch (kind) { default: if (jitMinOpts <= methodCount) { if (verbose) { printf(" Optimizations disabled by JitMinOpts and methodCount\n"); } theMinOptsValue = true; } break; case 0xD: { unsigned firstMinopts = (jitMinOpts >> 12) & 0xFFF; unsigned secondMinopts = (jitMinOpts >> 0) & 0xFFF; if ((firstMinopts == methodCountMask) || (secondMinopts == methodCountMask)) { if (verbose) { printf("0xD: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; case 0xE: { unsigned startMinopts = (jitMinOpts >> 12) & 0xFFF; unsigned endMinopts = (jitMinOpts >> 0) & 0xFFF; if ((startMinopts <= methodCountMask) && (endMinopts >= methodCountMask)) { if (verbose) { printf("0xE: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; case 0xF: { unsigned bitsZero = (jitMinOpts >> 12) & 0xFFF; unsigned bitsOne = (jitMinOpts >> 0) & 0xFFF; if (((methodCountMask & bitsOne) == bitsOne) && ((~methodCountMask & bitsZero) == bitsZero)) { if (verbose) { printf("0xF: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; } } if (!theMinOptsValue) { if (JitConfig.JitMinOptsName().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { theMinOptsValue = true; } } #if 0 // The code in this #if can be used to debug optimization issues according to method hash. // To use, uncomment, rebuild and set environment variables minoptshashlo and minoptshashhi. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("minoptshashlo"); unsigned methHashLo = 0; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); char* histr = getenv("minoptshashhi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); if (methHash >= methHashLo && methHash <= methHashHi) { printf("MinOpts for method %s, hash = %08x.\n", info.compFullName, methHash); printf(""); // in our logic this causes a flush theMinOptsValue = true; } } } #endif #endif if (compStressCompile(STRESS_MIN_OPTS, 5)) { theMinOptsValue = true; } // For PREJIT we never drop down to MinOpts // unless unless CLFLG_MINOPT is set else if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if ((unsigned)JitConfig.JitMinOptsCodeSize() < info.compILCodeSize) { JITLOG((LL_INFO10, "IL Code Size exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsInstrCount() < opts.instrCount) { JITLOG((LL_INFO10, "IL instruction count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsBbCount() < fgBBcount) { JITLOG((LL_INFO10, "Basic Block count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsLvNumCount() < lvaCount) { JITLOG((LL_INFO10, "Local Variable Num count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsLvRefCount() < opts.lvRefCount) { JITLOG((LL_INFO10, "Local Variable Ref count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } if (theMinOptsValue == true) { JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count " "%3d,%3d for method %s\n", info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName)); if (JitConfig.JitBreakOnMinOpts() != 0) { assert(!"MinOpts enabled"); } } } #else // !DEBUG // Retail check if we should force Minopts due to the complexity of the method // For PREJIT we never drop down to MinOpts // unless unless CLFLG_MINOPT is set if (!theMinOptsValue && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && ((DEFAULT_MIN_OPTS_CODE_SIZE < info.compILCodeSize) || (DEFAULT_MIN_OPTS_INSTR_COUNT < opts.instrCount) || (DEFAULT_MIN_OPTS_BB_COUNT < fgBBcount) || (DEFAULT_MIN_OPTS_LV_NUM_COUNT < lvaCount) || (DEFAULT_MIN_OPTS_LV_REF_COUNT < opts.lvRefCount))) { theMinOptsValue = true; } #endif // DEBUG JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count %3d,%3d for method %s\n", info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName)); #if 0 // The code in this #if has been useful in debugging loop cloning issues, by // enabling selective enablement of the loop cloning optimization according to // method hash. #ifdef DEBUG if (!theMinOptsValue) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("opthashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); // methHashLo = (unsigned(atoi(lostr)) << 2); // So we don't have to use negative numbers. } char* histr = getenv("opthashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); // methHashHi = (unsigned(atoi(histr)) << 2); // So we don't have to use negative numbers. } if (methHash < methHashLo || methHash > methHashHi) { theMinOptsValue = true; } else { printf("Doing optimization in in %s (0x%x).\n", info.compFullName, methHash); } } #endif #endif _SetMinOpts: // Set the MinOpts value opts.SetMinOpts(theMinOptsValue); // Notify the VM if MinOpts is being used when not requested if (theMinOptsValue && !compIsForInlining() && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) && !opts.compDbgCode) { info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_SWITCHED_TO_MIN_OPT); opts.jitFlags->Clear(JitFlags::JIT_FLAG_TIER1); compSwitchedToMinOpts = true; } #ifdef DEBUG if (verbose && !compIsForInlining()) { printf("OPTIONS: opts.MinOpts() == %s\n", opts.MinOpts() ? "true" : "false"); } #endif /* Control the optimizations */ if (opts.OptimizationDisabled()) { opts.compFlags &= ~CLFLG_MAXOPT; opts.compFlags |= CLFLG_MINOPT; } if (!compIsForInlining()) { codeGen->setFramePointerRequired(false); codeGen->setFrameRequired(false); if (opts.OptimizationDisabled()) { codeGen->setFrameRequired(true); } #if !defined(TARGET_AMD64) // The VM sets JitFlags::JIT_FLAG_FRAMED for two reasons: (1) the COMPlus_JitFramed variable is set, or // (2) the function is marked "noinline". The reason for #2 is that people mark functions // noinline to ensure the show up on in a stack walk. But for AMD64, we don't need a frame // pointer for the frame to show up in stack walk. if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_FRAMED)) codeGen->setFrameRequired(true); #endif if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // The JIT doesn't currently support loop alignment for prejitted images. // (The JIT doesn't know the final address of the code, hence // it can't align code based on unknown addresses.) codeGen->SetAlignLoops(false); // loop alignment not supported for prejitted code } else { codeGen->SetAlignLoops(JitConfig.JitAlignLoops() == 1); } } #if TARGET_ARM // A single JitStress=1 Linux ARM32 test fails when we expand virtual calls early // JIT\HardwareIntrinsics\General\Vector128_1\Vector128_1_ro // opts.compExpandCallsEarly = (JitConfig.JitExpandCallsEarly() == 2); #else opts.compExpandCallsEarly = (JitConfig.JitExpandCallsEarly() != 0); #endif fgCanRelocateEHRegions = true; } #ifdef TARGET_ARMARCH // Function compRsvdRegCheck: // given a curState to use for calculating the total frame size // it will return true if the REG_OPT_RSVD should be reserved so // that it can be use to form large offsets when accessing stack // based LclVar including both incoming and out going argument areas. // // The method advances the frame layout state to curState by calling // lvaFrameSize(curState). // bool Compiler::compRsvdRegCheck(FrameLayoutState curState) { // Always do the layout even if returning early. Callers might // depend on us to do the layout. unsigned frameSize = lvaFrameSize(curState); JITDUMP("\n" "compRsvdRegCheck\n" " frame size = %6d\n" " compArgSize = %6d\n", frameSize, compArgSize); if (opts.MinOpts()) { // Have a recovery path in case we fail to reserve REG_OPT_RSVD and go // over the limit of SP and FP offset ranges due to large // temps. JITDUMP(" Returning true (MinOpts)\n\n"); return true; } unsigned calleeSavedRegMaxSz = CALLEE_SAVED_REG_MAXSZ; if (compFloatingPointUsed) { calleeSavedRegMaxSz += CALLEE_SAVED_FLOAT_MAXSZ; } calleeSavedRegMaxSz += REGSIZE_BYTES; // we always push LR. See genPushCalleeSavedRegisters noway_assert(frameSize >= calleeSavedRegMaxSz); #if defined(TARGET_ARM64) // TODO-ARM64-CQ: update this! JITDUMP(" Returning true (ARM64)\n\n"); return true; // just always assume we'll need it, for now #else // TARGET_ARM // frame layout: // // ... high addresses ... // frame contents size // ------------------- ------------------------ // inArgs compArgSize (includes prespill) // caller SP ---> // prespill // LR REGSIZE_BYTES // R11 ---> R11 REGSIZE_BYTES // callee saved regs CALLEE_SAVED_REG_MAXSZ (32 bytes) // optional saved fp regs CALLEE_SAVED_FLOAT_MAXSZ (64 bytes) // lclSize // incl. TEMPS MAX_SPILL_TEMP_SIZE // incl. outArgs // SP ---> // ... low addresses ... // // When codeGen->isFramePointerRequired is true, R11 will be established as a frame pointer. // We can then use R11 to access incoming args with positive offsets, and LclVars with // negative offsets. // // In functions with EH, in the non-funclet (or main) region, even though we will have a // frame pointer, we can use SP with positive offsets to access any or all locals or arguments // that we can reach with SP-relative encodings. The funclet region might require the reserved // register, since it must use offsets from R11 to access the parent frame. unsigned maxR11PositiveEncodingOffset = compFloatingPointUsed ? 0x03FC : 0x0FFF; JITDUMP(" maxR11PositiveEncodingOffset = %6d\n", maxR11PositiveEncodingOffset); // Floating point load/store instructions (VLDR/VSTR) can address up to -0x3FC from R11, but we // don't know if there are either no integer locals, or if we don't need large negative offsets // for the integer locals, so we must use the integer max negative offset, which is a // smaller (absolute value) number. unsigned maxR11NegativeEncodingOffset = 0x00FF; // This is a negative offset from R11. JITDUMP(" maxR11NegativeEncodingOffset = %6d\n", maxR11NegativeEncodingOffset); // -1 because otherwise we are computing the address just beyond the last argument, which we don't need to do. unsigned maxR11PositiveOffset = compArgSize + (2 * REGSIZE_BYTES) - 1; JITDUMP(" maxR11PositiveOffset = %6d\n", maxR11PositiveOffset); // The value is positive, but represents a negative offset from R11. // frameSize includes callee-saved space for R11 and LR, which are at non-negative offsets from R11 // (+0 and +4, respectively), so don't include those in the max possible negative offset. assert(frameSize >= (2 * REGSIZE_BYTES)); unsigned maxR11NegativeOffset = frameSize - (2 * REGSIZE_BYTES); JITDUMP(" maxR11NegativeOffset = %6d\n", maxR11NegativeOffset); if (codeGen->isFramePointerRequired()) { if (maxR11NegativeOffset > maxR11NegativeEncodingOffset) { JITDUMP(" Returning true (frame required and maxR11NegativeOffset)\n\n"); return true; } if (maxR11PositiveOffset > maxR11PositiveEncodingOffset) { JITDUMP(" Returning true (frame required and maxR11PositiveOffset)\n\n"); return true; } } // Now consider the SP based frame case. Note that we will use SP based offsets to access the stack in R11 based // frames in the non-funclet main code area. unsigned maxSPPositiveEncodingOffset = compFloatingPointUsed ? 0x03FC : 0x0FFF; JITDUMP(" maxSPPositiveEncodingOffset = %6d\n", maxSPPositiveEncodingOffset); // -1 because otherwise we are computing the address just beyond the last argument, which we don't need to do. assert(compArgSize + frameSize > 0); unsigned maxSPPositiveOffset = compArgSize + frameSize - 1; if (codeGen->isFramePointerUsed()) { // We have a frame pointer, so we can use it to access part of the stack, even if SP can't reach those parts. // We will still generate SP-relative offsets if SP can reach. // First, check that the stack between R11 and SP can be fully reached, either via negative offset from FP // or positive offset from SP. Don't count stored R11 or LR, which are reached from positive offsets from FP. unsigned maxSPLocalsCombinedOffset = frameSize - (2 * REGSIZE_BYTES) - 1; JITDUMP(" maxSPLocalsCombinedOffset = %6d\n", maxSPLocalsCombinedOffset); if (maxSPLocalsCombinedOffset > maxSPPositiveEncodingOffset) { // Can R11 help? unsigned maxRemainingLocalsCombinedOffset = maxSPLocalsCombinedOffset - maxSPPositiveEncodingOffset; JITDUMP(" maxRemainingLocalsCombinedOffset = %6d\n", maxRemainingLocalsCombinedOffset); if (maxRemainingLocalsCombinedOffset > maxR11NegativeEncodingOffset) { JITDUMP(" Returning true (frame pointer exists; R11 and SP can't reach entire stack between them)\n\n"); return true; } // Otherwise, yes, we can address the remaining parts of the locals frame with negative offsets from R11. } // Check whether either R11 or SP can access the arguments. if ((maxR11PositiveOffset > maxR11PositiveEncodingOffset) && (maxSPPositiveOffset > maxSPPositiveEncodingOffset)) { JITDUMP(" Returning true (frame pointer exists; R11 and SP can't reach all arguments)\n\n"); return true; } } else { if (maxSPPositiveOffset > maxSPPositiveEncodingOffset) { JITDUMP(" Returning true (no frame pointer exists; SP can't reach all of frame)\n\n"); return true; } } // We won't need to reserve REG_OPT_RSVD. // JITDUMP(" Returning false\n\n"); return false; #endif // TARGET_ARM } #endif // TARGET_ARMARCH //------------------------------------------------------------------------ // compGetTieringName: get a string describing tiered compilation settings // for this method // // Arguments: // wantShortName - true if a short name is ok (say for using in file names) // // Returns: // String describing tiering decisions for this method, including cases // where the jit codegen will differ from what the runtime requested. // const char* Compiler::compGetTieringName(bool wantShortName) const { const bool tier0 = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0); const bool tier1 = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1); if (!opts.compMinOptsIsSet) { // If 'compMinOptsIsSet' is not set, just return here. Otherwise, if this method is called // by the assertAbort(), we would recursively call assert while trying to get MinOpts() // and eventually stackoverflow. return "Optimization-Level-Not-Yet-Set"; } assert(!tier0 || !tier1); // We don't expect multiple TIER flags to be set at one time. if (tier0) { return "Tier0"; } else if (tier1) { if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { return "Tier1-OSR"; } else { return "Tier1"; } } else if (opts.OptimizationEnabled()) { if (compSwitchedToOptimized) { return wantShortName ? "Tier0-FullOpts" : "Tier-0 switched to FullOpts"; } else { return "FullOpts"; } } else if (opts.MinOpts()) { if (compSwitchedToMinOpts) { if (compSwitchedToOptimized) { return wantShortName ? "Tier0-FullOpts-MinOpts" : "Tier-0 switched to FullOpts, then to MinOpts"; } else { return wantShortName ? "Tier0-MinOpts" : "Tier-0 switched MinOpts"; } } else { return "MinOpts"; } } else if (opts.compDbgCode) { return "Debug"; } else { return wantShortName ? "Unknown" : "Unknown optimization level"; } } //------------------------------------------------------------------------ // compGetStressMessage: get a string describing jitstress capability // for this method // // Returns: // An empty string if stress is not enabled, else a string describing // if this method is subject to stress or is excluded by name or hash. // const char* Compiler::compGetStressMessage() const { // Add note about stress where appropriate const char* stressMessage = ""; #ifdef DEBUG // Is stress enabled via mode name or level? if ((JitConfig.JitStressModeNames() != nullptr) || (getJitStressLevel() > 0)) { // Is the method being jitted excluded from stress via range? if (bRangeAllowStress) { // Or is it excluded via name? if (!JitConfig.JitStressOnly().isEmpty() || !JitConfig.JitStressOnly().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { // Not excluded -- stress can happen stressMessage = " JitStress"; } else { stressMessage = " NoJitStress(Only)"; } } else { stressMessage = " NoJitStress(Range)"; } } #endif // DEBUG return stressMessage; } void Compiler::compFunctionTraceStart() { #ifdef DEBUG if (compIsForInlining()) { return; } if ((JitConfig.JitFunctionTrace() != 0) && !opts.disDiffable) { LONG newJitNestingLevel = InterlockedIncrement(&Compiler::jitNestingLevel); if (newJitNestingLevel <= 0) { printf("{ Illegal nesting level %d }\n", newJitNestingLevel); } for (LONG i = 0; i < newJitNestingLevel - 1; i++) { printf(" "); } printf("{ Start Jitting Method %4d %s (MethodHash=%08x) %s\n", Compiler::jitTotalMethodCompiled, info.compFullName, info.compMethodHash(), compGetTieringName()); /* } editor brace matching workaround for this printf */ } #endif // DEBUG } void Compiler::compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI) { #ifdef DEBUG assert(!compIsForInlining()); if ((JitConfig.JitFunctionTrace() != 0) && !opts.disDiffable) { LONG newJitNestingLevel = InterlockedDecrement(&Compiler::jitNestingLevel); if (newJitNestingLevel < 0) { printf("{ Illegal nesting level %d }\n", newJitNestingLevel); } for (LONG i = 0; i < newJitNestingLevel; i++) { printf(" "); } // Note: that is incorrect if we are compiling several methods at the same time. unsigned methodNumber = Compiler::jitTotalMethodCompiled - 1; /* { editor brace-matching workaround for following printf */ printf("} Jitted Method %4d at" FMT_ADDR "method %s size %08x%s%s\n", methodNumber, DBG_ADDR(methodCodePtr), info.compFullName, methodCodeSize, isNYI ? " NYI" : (compIsForImportOnly() ? " import only" : ""), opts.altJit ? " altjit" : ""); } #endif // DEBUG } //------------------------------------------------------------------------ // BeginPhase: begin execution of a phase // // Arguments: // phase - the phase that is about to begin // void Compiler::BeginPhase(Phases phase) { mostRecentlyActivePhase = phase; } //------------------------------------------------------------------------ // EndPhase: finish execution of a phase // // Arguments: // phase - the phase that has just finished // void Compiler::EndPhase(Phases phase) { #if defined(FEATURE_JIT_METHOD_PERF) if (pCompJitTimer != nullptr) { pCompJitTimer->EndPhase(this, phase); } #endif mostRecentlyActivePhase = phase; } //------------------------------------------------------------------------ // compCompile: run phases needed for compilation // // Arguments: // methodCodePtr [OUT] - address of generated code // methodCodeSize [OUT] - size of the generated code (hot + cold setions) // compileFlags [IN] - flags controlling jit behavior // // Notes: // This is the most interesting 'toplevel' function in the JIT. It goes through the operations of // importing, morphing, optimizations and code generation. This is called from the EE through the // code:CILJit::compileMethod function. // // For an overview of the structure of the JIT, see: // https://github.com/dotnet/runtime/blob/main/docs/design/coreclr/jit/ryujit-overview.md // // Also called for inlinees, though they will only be run through the first few phases. // void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { // Prepare for importation // auto preImportPhase = [this]() { if (compIsForInlining()) { // Notify root instance that an inline attempt is about to import IL impInlineRoot()->m_inlineStrategy->NoteImport(); } hashBv::Init(this); VarSetOps::AssignAllowUninitRhs(this, compCurLife, VarSetOps::UninitVal()); // The temp holding the secret stub argument is used by fgImport() when importing the intrinsic. if (info.compPublishStubParam) { assert(lvaStubArgumentVar == BAD_VAR_NUM); lvaStubArgumentVar = lvaGrabTempWithImplicitUse(false DEBUGARG("stub argument")); lvaGetDesc(lvaStubArgumentVar)->lvType = TYP_I_IMPL; // TODO-CQ: there is no need to mark it as doNotEnreg. There are no stores for this local // before codegen so liveness and LSRA mark it as "liveIn" and always allocate a stack slot for it. // However, it would be better to process it like other argument locals and keep it in // a reg for the whole method without spilling to the stack when possible. lvaSetVarDoNotEnregister(lvaStubArgumentVar DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } }; DoPhase(this, PHASE_PRE_IMPORT, preImportPhase); compFunctionTraceStart(); // Incorporate profile data. // // Note: the importer is sensitive to block weights, so this has // to happen before importation. // DoPhase(this, PHASE_INCPROFILE, &Compiler::fgIncorporateProfileData); // If we're going to instrument code, we may need to prepare before // we import. // if (compileFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { DoPhase(this, PHASE_IBCPREP, &Compiler::fgPrepareToInstrumentMethod); } // Import: convert the instrs in each basic block to a tree based intermediate representation // DoPhase(this, PHASE_IMPORTATION, &Compiler::fgImport); // Expand any patchpoints // DoPhase(this, PHASE_PATCHPOINTS, &Compiler::fgTransformPatchpoints); // If instrumenting, add block and class probes. // if (compileFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { DoPhase(this, PHASE_IBCINSTR, &Compiler::fgInstrumentMethod); } // Transform indirect calls that require control flow expansion. // DoPhase(this, PHASE_INDXCALL, &Compiler::fgTransformIndirectCalls); // PostImportPhase: cleanup inlinees // auto postImportPhase = [this]() { // If this is a viable inline candidate if (compIsForInlining() && !compDonotInline()) { // Filter out unimported BBs in the inlinee // fgPostImportationCleanup(); // Update type of return spill temp if we have gathered // better info when importing the inlinee, and the return // spill temp is single def. if (fgNeedReturnSpillTemp()) { CORINFO_CLASS_HANDLE retExprClassHnd = impInlineInfo->retExprClassHnd; if (retExprClassHnd != nullptr) { LclVarDsc* returnSpillVarDsc = lvaGetDesc(lvaInlineeReturnSpillTemp); if (returnSpillVarDsc->lvSingleDef) { lvaUpdateClass(lvaInlineeReturnSpillTemp, retExprClassHnd, impInlineInfo->retExprClassHndIsExact); } } } } }; DoPhase(this, PHASE_POST_IMPORT, postImportPhase); // If we're importing for inlining, we're done. if (compIsForInlining()) { #ifdef FEATURE_JIT_METHOD_PERF if (pCompJitTimer != nullptr) { #if MEASURE_CLRAPI_CALLS EndPhase(PHASE_CLR_API); #endif pCompJitTimer->Terminate(this, CompTimeSummaryInfo::s_compTimeSummary, false); } #endif return; } // At this point in the phase list, all the inlinee phases have // been run, and inlinee compiles have exited, so we should only // get this far if we are jitting the root method. noway_assert(!compIsForInlining()); // Maybe the caller was not interested in generating code if (compIsForImportOnly()) { compFunctionTraceEnd(nullptr, 0, false); return; } #if !FEATURE_EH // If we aren't yet supporting EH in a compiler bring-up, remove as many EH handlers as possible, so // we can pass tests that contain try/catch EH, but don't actually throw any exceptions. fgRemoveEH(); #endif // !FEATURE_EH // We could allow ESP frames. Just need to reserve space for // pushing EBP if the method becomes an EBP-frame after an edit. // Note that requiring a EBP Frame disallows double alignment. Thus if we change this // we either have to disallow double alignment for E&C some other way or handle it in EETwain. if (opts.compDbgEnC) { codeGen->setFramePointerRequired(true); // We don't care about localloc right now. If we do support it, // EECodeManager::FixContextForEnC() needs to handle it smartly // in case the localloc was actually executed. // // compLocallocUsed = true; } // Start phases that are broadly called morphing, and includes // global morph, as well as other phases that massage the trees so // that we can generate code out of them. // auto morphInitPhase = [this]() { // Initialize the BlockSet epoch NewBasicBlockEpoch(); fgOutgoingArgTemps = nullptr; // Insert call to class constructor as the first basic block if // we were asked to do so. if (info.compCompHnd->initClass(nullptr /* field */, nullptr /* method */, impTokenLookupContextHandle /* context */) & CORINFO_INITCLASS_USE_HELPER) { fgEnsureFirstBBisScratch(); fgNewStmtAtBeg(fgFirstBB, fgInitThisClass()); } #ifdef DEBUG if (opts.compGcChecks) { for (unsigned i = 0; i < info.compArgsCount; i++) { if (lvaGetDesc(i)->TypeGet() == TYP_REF) { // confirm that the argument is a GC pointer (for debugging (GC stress)) GenTree* op = gtNewLclvNode(i, TYP_REF); GenTreeCall::Use* args = gtNewCallArgs(op); op = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_VOID, args); fgEnsureFirstBBisScratch(); fgNewStmtAtEnd(fgFirstBB, op); if (verbose) { printf("\ncompGcChecks tree:\n"); gtDispTree(op); } } } } #endif // DEBUG #if defined(DEBUG) && defined(TARGET_XARCH) if (opts.compStackCheckOnRet) { lvaReturnSpCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("ReturnSpCheck")); lvaSetVarDoNotEnregister(lvaReturnSpCheck, DoNotEnregisterReason::ReturnSpCheck); lvaGetDesc(lvaReturnSpCheck)->lvType = TYP_I_IMPL; } #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) if (opts.compStackCheckOnCall) { lvaCallSpCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("CallSpCheck")); lvaGetDesc(lvaCallSpCheck)->lvType = TYP_I_IMPL; } #endif // defined(DEBUG) && defined(TARGET_X86) // Update flow graph after importation. // Removes un-imported blocks, trims EH, and ensures correct OSR entry flow. // fgPostImportationCleanup(); }; DoPhase(this, PHASE_MORPH_INIT, morphInitPhase); #ifdef DEBUG // Inliner could add basic blocks. Check that the flowgraph data is up-to-date fgDebugCheckBBlist(false, false); #endif // DEBUG // Inline callee methods into this root method // DoPhase(this, PHASE_MORPH_INLINE, &Compiler::fgInline); // Record "start" values for post-inlining cycles and elapsed time. RecordStateAtEndOfInlining(); // Transform each GT_ALLOCOBJ node into either an allocation helper call or // local variable allocation on the stack. ObjectAllocator objectAllocator(this); // PHASE_ALLOCATE_OBJECTS if (compObjectStackAllocation() && opts.OptimizationEnabled()) { objectAllocator.EnableObjectStackAllocation(); } objectAllocator.Run(); // Add any internal blocks/trees we may need // DoPhase(this, PHASE_MORPH_ADD_INTERNAL, &Compiler::fgAddInternal); // Remove empty try regions // DoPhase(this, PHASE_EMPTY_TRY, &Compiler::fgRemoveEmptyTry); // Remove empty finally regions // DoPhase(this, PHASE_EMPTY_FINALLY, &Compiler::fgRemoveEmptyFinally); // Streamline chains of finally invocations // DoPhase(this, PHASE_MERGE_FINALLY_CHAINS, &Compiler::fgMergeFinallyChains); // Clone code in finallys to reduce overhead for non-exceptional paths // DoPhase(this, PHASE_CLONE_FINALLY, &Compiler::fgCloneFinally); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Update finally target flags after EH optimizations // DoPhase(this, PHASE_UPDATE_FINALLY_FLAGS, &Compiler::fgUpdateFinallyTargetFlags); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #if DEBUG if (lvaEnregEHVars) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("JitEHWTHashLo"); unsigned methHashLo = 0; bool dump = false; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); dump = true; } char* histr = getenv("JitEHWTHashHi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); dump = true; } if (methHash < methHashLo || methHash > methHashHi) { lvaEnregEHVars = false; } else if (dump) { printf("Enregistering EH Vars for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // flush } } if (lvaEnregMultiRegVars) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("JitMultiRegHashLo"); unsigned methHashLo = 0; bool dump = false; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); dump = true; } char* histr = getenv("JitMultiRegHashHi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); dump = true; } if (methHash < methHashLo || methHash > methHashHi) { lvaEnregMultiRegVars = false; } else if (dump) { printf("Enregistering MultiReg Vars for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // flush } } #endif // Compute bbNum, bbRefs and bbPreds // // This is the first time full (not cheap) preds will be computed. // And, if we have profile data, we can now check integrity. // // From this point on the flowgraph information such as bbNum, // bbRefs or bbPreds has to be kept updated. // auto computePredsPhase = [this]() { JITDUMP("\nRenumbering the basic blocks for fgComputePred\n"); fgRenumberBlocks(); noway_assert(!fgComputePredsDone); fgComputePreds(); }; DoPhase(this, PHASE_COMPUTE_PREDS, computePredsPhase); // Now that we have pred lists, do some flow-related optimizations // if (opts.OptimizationEnabled()) { // Merge common throw blocks // DoPhase(this, PHASE_MERGE_THROWS, &Compiler::fgTailMergeThrows); // Run an early flow graph simplification pass // auto earlyUpdateFlowGraphPhase = [this]() { constexpr bool doTailDup = false; fgUpdateFlowGraph(doTailDup); }; DoPhase(this, PHASE_EARLY_UPDATE_FLOW_GRAPH, earlyUpdateFlowGraphPhase); } // Promote struct locals // auto promoteStructsPhase = [this]() { // For x64 and ARM64 we need to mark irregular parameters lvaRefCountState = RCS_EARLY; fgResetImplicitByRefRefCount(); fgPromoteStructs(); }; DoPhase(this, PHASE_PROMOTE_STRUCTS, promoteStructsPhase); // Figure out what locals are address-taken. // DoPhase(this, PHASE_STR_ADRLCL, &Compiler::fgMarkAddressExposedLocals); // Run a simple forward substitution pass. // DoPhase(this, PHASE_FWD_SUB, &Compiler::fgForwardSub); // Apply the type update to implicit byref parameters; also choose (based on address-exposed // analysis) which implicit byref promotions to keep (requires copy to initialize) or discard. // DoPhase(this, PHASE_MORPH_IMPBYREF, &Compiler::fgRetypeImplicitByRefArgs); #ifdef DEBUG // Now that locals have address-taken and implicit byref marked, we can safely apply stress. lvaStressLclFld(); fgStress64RsltMul(); #endif // DEBUG // Morph the trees in all the blocks of the method // auto morphGlobalPhase = [this]() { unsigned prevBBCount = fgBBcount; fgMorphBlocks(); // Fix any LclVar annotations on discarded struct promotion temps for implicit by-ref args fgMarkDemotedImplicitByRefArgs(); lvaRefCountState = RCS_INVALID; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (fgNeedToAddFinallyTargetBits) { // We previously wiped out the BBF_FINALLY_TARGET bits due to some morphing; add them back. fgAddFinallyTargetFlags(); fgNeedToAddFinallyTargetBits = false; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Decide the kind of code we want to generate fgSetOptions(); fgExpandQmarkNodes(); #ifdef DEBUG compCurBB = nullptr; #endif // DEBUG // If we needed to create any new BasicBlocks then renumber the blocks if (fgBBcount > prevBBCount) { fgRenumberBlocks(); } // We can now enable all phase checking activePhaseChecks = PhaseChecks::CHECK_ALL; }; DoPhase(this, PHASE_MORPH_GLOBAL, morphGlobalPhase); // GS security checks for unsafe buffers // auto gsPhase = [this]() { unsigned prevBBCount = fgBBcount; if (getNeedsGSSecurityCookie()) { gsGSChecksInitCookie(); if (compGSReorderStackLayout) { gsCopyShadowParams(); } // If we needed to create any new BasicBlocks then renumber the blocks if (fgBBcount > prevBBCount) { fgRenumberBlocks(); } } else { JITDUMP("No GS security needed\n"); } }; DoPhase(this, PHASE_GS_COOKIE, gsPhase); // Compute the block and edge weights // DoPhase(this, PHASE_COMPUTE_EDGE_WEIGHTS, &Compiler::fgComputeBlockAndEdgeWeights); #if defined(FEATURE_EH_FUNCLETS) // Create funclets from the EH handlers. // DoPhase(this, PHASE_CREATE_FUNCLETS, &Compiler::fgCreateFunclets); #endif // FEATURE_EH_FUNCLETS if (opts.OptimizationEnabled()) { // Invert loops // DoPhase(this, PHASE_INVERT_LOOPS, &Compiler::optInvertLoops); // Optimize block order // DoPhase(this, PHASE_OPTIMIZE_LAYOUT, &Compiler::optOptimizeLayout); // Compute reachability sets and dominators. // DoPhase(this, PHASE_COMPUTE_REACHABILITY, &Compiler::fgComputeReachability); // Scale block weights and mark run rarely blocks. // DoPhase(this, PHASE_SET_BLOCK_WEIGHTS, &Compiler::optSetBlockWeights); // Discover and classify natural loops (e.g. mark iterative loops as such). Also marks loop blocks // and sets bbWeight to the loop nesting levels. // DoPhase(this, PHASE_FIND_LOOPS, &Compiler::optFindLoopsPhase); // Clone loops with optimization opportunities, and choose one based on dynamic condition evaluation. // DoPhase(this, PHASE_CLONE_LOOPS, &Compiler::optCloneLoops); // Unroll loops // DoPhase(this, PHASE_UNROLL_LOOPS, &Compiler::optUnrollLoops); // Clear loop table info that is not used after this point, and might become invalid. // DoPhase(this, PHASE_CLEAR_LOOP_INFO, &Compiler::optClearLoopIterInfo); } #ifdef DEBUG fgDebugCheckLinks(); #endif // Create the variable table (and compute variable ref counts) // DoPhase(this, PHASE_MARK_LOCAL_VARS, &Compiler::lvaMarkLocalVars); // IMPORTANT, after this point, locals are ref counted. // However, ref counts are not kept incrementally up to date. assert(lvaLocalVarRefCounted()); if (opts.OptimizationEnabled()) { // Optimize boolean conditions // DoPhase(this, PHASE_OPTIMIZE_BOOLS, &Compiler::optOptimizeBools); // optOptimizeBools() might have changed the number of blocks; the dominators/reachability might be bad. } // Figure out the order in which operators are to be evaluated // DoPhase(this, PHASE_FIND_OPER_ORDER, &Compiler::fgFindOperOrder); // Weave the tree lists. Anyone who modifies the tree shapes after // this point is responsible for calling fgSetStmtSeq() to keep the // nodes properly linked. // This can create GC poll calls, and create new BasicBlocks (without updating dominators/reachability). // DoPhase(this, PHASE_SET_BLOCK_ORDER, &Compiler::fgSetBlockOrder); // At this point we know if we are fully interruptible or not if (opts.OptimizationEnabled()) { bool doSsa = true; bool doEarlyProp = true; bool doValueNum = true; bool doLoopHoisting = true; bool doCopyProp = true; bool doBranchOpt = true; bool doAssertionProp = true; bool doRangeAnalysis = true; int iterations = 1; #if defined(OPT_CONFIG) doSsa = (JitConfig.JitDoSsa() != 0); doEarlyProp = doSsa && (JitConfig.JitDoEarlyProp() != 0); doValueNum = doSsa && (JitConfig.JitDoValueNumber() != 0); doLoopHoisting = doValueNum && (JitConfig.JitDoLoopHoisting() != 0); doCopyProp = doValueNum && (JitConfig.JitDoCopyProp() != 0); doBranchOpt = doValueNum && (JitConfig.JitDoRedundantBranchOpts() != 0); doAssertionProp = doValueNum && (JitConfig.JitDoAssertionProp() != 0); doRangeAnalysis = doAssertionProp && (JitConfig.JitDoRangeAnalysis() != 0); if (opts.optRepeat) { iterations = JitConfig.JitOptRepeatCount(); } #endif // defined(OPT_CONFIG) while (iterations > 0) { if (doSsa) { // Build up SSA form for the IR // DoPhase(this, PHASE_BUILD_SSA, &Compiler::fgSsaBuild); } if (doEarlyProp) { // Propagate array length and rewrite getType() method call // DoPhase(this, PHASE_EARLY_PROP, &Compiler::optEarlyProp); } if (doValueNum) { // Value number the trees // DoPhase(this, PHASE_VALUE_NUMBER, &Compiler::fgValueNumber); } if (doLoopHoisting) { // Hoist invariant code out of loops // DoPhase(this, PHASE_HOIST_LOOP_CODE, &Compiler::optHoistLoopCode); } if (doCopyProp) { // Perform VN based copy propagation // DoPhase(this, PHASE_VN_COPY_PROP, &Compiler::optVnCopyProp); } if (doBranchOpt) { DoPhase(this, PHASE_OPTIMIZE_BRANCHES, &Compiler::optRedundantBranches); } // Remove common sub-expressions // DoPhase(this, PHASE_OPTIMIZE_VALNUM_CSES, &Compiler::optOptimizeCSEs); if (doAssertionProp) { // Assertion propagation // DoPhase(this, PHASE_ASSERTION_PROP_MAIN, &Compiler::optAssertionPropMain); } if (doRangeAnalysis) { auto rangePhase = [this]() { RangeCheck rc(this); rc.OptimizeRangeChecks(); }; // Bounds check elimination via range analysis // DoPhase(this, PHASE_OPTIMIZE_INDEX_CHECKS, rangePhase); } if (fgModified) { // update the flowgraph if we modified it during the optimization phase // auto optUpdateFlowGraphPhase = [this]() { constexpr bool doTailDup = false; fgUpdateFlowGraph(doTailDup); }; DoPhase(this, PHASE_OPT_UPDATE_FLOW_GRAPH, optUpdateFlowGraphPhase); // Recompute the edge weight if we have modified the flow graph // DoPhase(this, PHASE_COMPUTE_EDGE_WEIGHTS2, &Compiler::fgComputeEdgeWeights); } // Iterate if requested, resetting annotations first. if (--iterations == 0) { break; } ResetOptAnnotations(); RecomputeLoopInfo(); } } // Insert GC Polls DoPhase(this, PHASE_INSERT_GC_POLLS, &Compiler::fgInsertGCPolls); // Determine start of cold region if we are hot/cold splitting // DoPhase(this, PHASE_DETERMINE_FIRST_COLD_BLOCK, &Compiler::fgDetermineFirstColdBlock); #ifdef DEBUG fgDebugCheckLinks(compStressCompile(STRESS_REMORPH_TREES, 50)); // Stash the current estimate of the function's size if necessary. if (verbose) { compSizeEstimate = 0; compCycleEstimate = 0; for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { compSizeEstimate += stmt->GetCostSz(); compCycleEstimate += stmt->GetCostEx(); } } } #endif // rationalize trees Rationalizer rat(this); // PHASE_RATIONALIZE rat.Run(); // Here we do "simple lowering". When the RyuJIT backend works for all // platforms, this will be part of the more general lowering phase. For now, though, we do a separate // pass of "final lowering." We must do this before (final) liveness analysis, because this creates // range check throw blocks, in which the liveness must be correct. // DoPhase(this, PHASE_SIMPLE_LOWERING, &Compiler::fgSimpleLowering); // Enable this to gather statistical data such as // call and register argument info, flowgraph and loop info, etc. compJitStats(); #ifdef TARGET_ARM if (compLocallocUsed) { // We reserve REG_SAVED_LOCALLOC_SP to store SP on entry for stack unwinding codeGen->regSet.rsMaskResvd |= RBM_SAVED_LOCALLOC_SP; } #endif // TARGET_ARM // Assign registers to variables, etc. /////////////////////////////////////////////////////////////////////////////// // Dominator and reachability sets are no longer valid. They haven't been // maintained up to here, and shouldn't be used (unless recomputed). /////////////////////////////////////////////////////////////////////////////// fgDomsComputed = false; // Create LinearScan before Lowering, so that Lowering can call LinearScan methods // for determining whether locals are register candidates and (for xarch) whether // a node is a containable memory op. m_pLinearScan = getLinearScanAllocator(this); // Lower // m_pLowering = new (this, CMK_LSRA) Lowering(this, m_pLinearScan); // PHASE_LOWERING m_pLowering->Run(); if (!compMacOsArm64Abi()) { // Set stack levels; this information is necessary for x86 // but on other platforms it is used only in asserts. // TODO: do not run it in release on other platforms, see https://github.com/dotnet/runtime/issues/42673. StackLevelSetter stackLevelSetter(this); stackLevelSetter.Run(); } // We can not add any new tracked variables after this point. lvaTrackedFixed = true; // Now that lowering is completed we can proceed to perform register allocation // auto linearScanPhase = [this]() { m_pLinearScan->doLinearScan(); }; DoPhase(this, PHASE_LINEAR_SCAN, linearScanPhase); // Copied from rpPredictRegUse() SetFullPtrRegMapRequired(codeGen->GetInterruptible() || !codeGen->isFramePointerUsed()); #if FEATURE_LOOP_ALIGN // Place loop alignment instructions DoPhase(this, PHASE_ALIGN_LOOPS, &Compiler::placeLoopAlignInstructions); #endif // Generate code codeGen->genGenerateCode(methodCodePtr, methodCodeSize); #if TRACK_LSRA_STATS if (JitConfig.DisplayLsraStats() == 2) { m_pLinearScan->dumpLsraStatsCsv(jitstdout); } #endif // TRACK_LSRA_STATS // We're done -- set the active phase to the last phase // (which isn't really a phase) mostRecentlyActivePhase = PHASE_POST_EMIT; #ifdef FEATURE_JIT_METHOD_PERF if (pCompJitTimer) { #if MEASURE_CLRAPI_CALLS EndPhase(PHASE_CLR_API); #else EndPhase(PHASE_POST_EMIT); #endif pCompJitTimer->Terminate(this, CompTimeSummaryInfo::s_compTimeSummary, true); } #endif // Generate PatchpointInfo generatePatchpointInfo(); RecordStateAtEndOfCompilation(); #ifdef FEATURE_TRACELOGGING compJitTelemetry.NotifyEndOfCompilation(); #endif #if defined(DEBUG) ++Compiler::jitTotalMethodCompiled; #endif // defined(DEBUG) compFunctionTraceEnd(*methodCodePtr, *methodCodeSize, false); JITDUMP("Method code size: %d\n", (unsigned)(*methodCodeSize)); #if FUNC_INFO_LOGGING if (compJitFuncInfoFile != nullptr) { assert(!compIsForInlining()); #ifdef DEBUG // We only have access to info.compFullName in DEBUG builds. fprintf(compJitFuncInfoFile, "%s\n", info.compFullName); #elif FEATURE_SIMD fprintf(compJitFuncInfoFile, " %s\n", eeGetMethodFullName(info.compMethodHnd)); #endif fprintf(compJitFuncInfoFile, ""); // in our logic this causes a flush } #endif // FUNC_INFO_LOGGING } #if FEATURE_LOOP_ALIGN //------------------------------------------------------------------------ // placeLoopAlignInstructions: Iterate over all the blocks and determine // the best position to place the 'align' instruction. Inserting 'align' // instructions after an unconditional branch is preferred over inserting // in the block before the loop. In case there are multiple blocks // having 'jmp', the one that has lower weight is preferred. // If the block having 'jmp' is hotter than the block before the loop, // the align will still be placed after 'jmp' because the processor should // be smart enough to not fetch extra instruction beyond jmp. // void Compiler::placeLoopAlignInstructions() { if (loopAlignCandidates == 0) { return; } int loopsToProcess = loopAlignCandidates; JITDUMP("Inside placeLoopAlignInstructions for %d loops.\n", loopAlignCandidates); // Add align only if there were any loops that needed alignment weight_t minBlockSoFar = BB_MAX_WEIGHT; BasicBlock* bbHavingAlign = nullptr; BasicBlock::loopNumber currentAlignedLoopNum = BasicBlock::NOT_IN_LOOP; if ((fgFirstBB != nullptr) && fgFirstBB->isLoopAlign()) { // Adding align instruction in prolog is not supported // hence just remove that loop from our list. loopsToProcess--; } for (BasicBlock* const block : Blocks()) { if (currentAlignedLoopNum != BasicBlock::NOT_IN_LOOP) { // We've been processing blocks within an aligned loop. Are we out of that loop now? if (currentAlignedLoopNum != block->bbNatLoopNum) { currentAlignedLoopNum = BasicBlock::NOT_IN_LOOP; } } // If there is a unconditional jump (which is not part of callf/always pair) if (opts.compJitHideAlignBehindJmp && (block->bbJumpKind == BBJ_ALWAYS) && !block->isBBCallAlwaysPairTail()) { // Track the lower weight blocks if (block->bbWeight < minBlockSoFar) { if (currentAlignedLoopNum == BasicBlock::NOT_IN_LOOP) { // Ok to insert align instruction in this block because it is not part of any aligned loop. minBlockSoFar = block->bbWeight; bbHavingAlign = block; JITDUMP(FMT_BB ", bbWeight=" FMT_WT " ends with unconditional 'jmp' \n", block->bbNum, block->bbWeight); } } } if ((block->bbNext != nullptr) && (block->bbNext->isLoopAlign())) { // If jmp was not found, then block before the loop start is where align instruction will be added. if (bbHavingAlign == nullptr) { bbHavingAlign = block; JITDUMP("Marking " FMT_BB " before the loop with BBF_HAS_ALIGN for loop at " FMT_BB "\n", block->bbNum, block->bbNext->bbNum); } else { JITDUMP("Marking " FMT_BB " that ends with unconditional jump with BBF_HAS_ALIGN for loop at " FMT_BB "\n", bbHavingAlign->bbNum, block->bbNext->bbNum); } bbHavingAlign->bbFlags |= BBF_HAS_ALIGN; minBlockSoFar = BB_MAX_WEIGHT; bbHavingAlign = nullptr; currentAlignedLoopNum = block->bbNext->bbNatLoopNum; if (--loopsToProcess == 0) { break; } } } assert(loopsToProcess == 0); } #endif //------------------------------------------------------------------------ // generatePatchpointInfo: allocate and fill in patchpoint info data, // and report it to the VM // void Compiler::generatePatchpointInfo() { if (!doesMethodHavePatchpoints() && !doesMethodHavePartialCompilationPatchpoints()) { // Nothing to report return; } // Patchpoints are only found in Tier0 code, which is unoptimized, and so // should always have frame pointer. assert(codeGen->isFramePointerUsed()); // Allocate patchpoint info storage from runtime, and fill in initial bits of data. const unsigned patchpointInfoSize = PatchpointInfo::ComputeSize(info.compLocalsCount); PatchpointInfo* const patchpointInfo = (PatchpointInfo*)info.compCompHnd->allocateArray(patchpointInfoSize); // Patchpoint offsets always refer to "virtual frame offsets". // // For x64 this falls out because Tier0 frames are always FP frames, and so the FP-relative // offset is what we want. // // For arm64, if the frame pointer is not at the top of the frame, we need to adjust the // offset. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) // We add +TARGET_POINTER_SIZE here is to account for the slot that Jit_Patchpoint // creates when it simulates calling the OSR method (the "pseudo return address" slot). // This is effectively a new slot at the bottom of the Tier0 frame. // const int totalFrameSize = codeGen->genTotalFrameSize() + TARGET_POINTER_SIZE; const int offsetAdjust = 0; #elif defined(TARGET_ARM64) // SP is not manipulated by calls so no frame size adjustment needed. // Local Offsets may need adjusting, if FP is at bottom of frame. // const int totalFrameSize = codeGen->genTotalFrameSize(); const int offsetAdjust = codeGen->genSPtoFPdelta() - totalFrameSize; #else NYI("patchpoint info generation"); const int offsetAdjust = 0; const int totalFrameSize = 0; #endif patchpointInfo->Initialize(info.compLocalsCount, totalFrameSize); JITDUMP("--OSR--- Total Frame Size %d, local offset adjust is %d\n", patchpointInfo->TotalFrameSize(), offsetAdjust); // We record offsets for all the "locals" here. Could restrict // this to just the IL locals with some extra logic, and save a bit of space, // but would need to adjust all consumers, too. for (unsigned lclNum = 0; lclNum < info.compLocalsCount; lclNum++) { LclVarDsc* const varDsc = lvaGetDesc(lclNum); // We expect all these to have stack homes, and be FP relative assert(varDsc->lvOnFrame); assert(varDsc->lvFramePointerBased); // Record FramePtr relative offset (no localloc yet) patchpointInfo->SetOffset(lclNum, varDsc->GetStackOffset() + offsetAdjust); // Note if IL stream contained an address-of that potentially leads to exposure. // This bit of IL may be skipped by OSR partial importation. if (varDsc->lvHasLdAddrOp) { patchpointInfo->SetIsExposed(lclNum); } JITDUMP("--OSR-- V%02u is at virtual offset %d%s\n", lclNum, patchpointInfo->Offset(lclNum), patchpointInfo->IsExposed(lclNum) ? " (exposed)" : ""); } // Special offsets // if (lvaReportParamTypeArg()) { const int offset = lvaCachedGenericContextArgOffset(); patchpointInfo->SetGenericContextArgOffset(offset + offsetAdjust); JITDUMP("--OSR-- cached generic context virtual offset is %d\n", patchpointInfo->GenericContextArgOffset()); } if (lvaKeepAliveAndReportThis()) { const int offset = lvaCachedGenericContextArgOffset(); patchpointInfo->SetKeptAliveThisOffset(offset + offsetAdjust); JITDUMP("--OSR-- kept-alive this virtual offset is %d\n", patchpointInfo->KeptAliveThisOffset()); } if (compGSReorderStackLayout) { assert(lvaGSSecurityCookie != BAD_VAR_NUM); LclVarDsc* const varDsc = lvaGetDesc(lvaGSSecurityCookie); patchpointInfo->SetSecurityCookieOffset(varDsc->GetStackOffset() + offsetAdjust); JITDUMP("--OSR-- security cookie V%02u virtual offset is %d\n", lvaGSSecurityCookie, patchpointInfo->SecurityCookieOffset()); } if (lvaMonAcquired != BAD_VAR_NUM) { LclVarDsc* const varDsc = lvaGetDesc(lvaMonAcquired); patchpointInfo->SetMonitorAcquiredOffset(varDsc->GetStackOffset() + offsetAdjust); JITDUMP("--OSR-- monitor acquired V%02u virtual offset is %d\n", lvaMonAcquired, patchpointInfo->MonitorAcquiredOffset()); } #if defined(TARGET_AMD64) // Record callee save registers. // Currently only needed for x64. // regMaskTP rsPushRegs = codeGen->regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; rsPushRegs |= RBM_FPBASE; patchpointInfo->SetCalleeSaveRegisters((uint64_t)rsPushRegs); JITDUMP("--OSR-- Tier0 callee saves: "); JITDUMPEXEC(dspRegMask((regMaskTP)patchpointInfo->CalleeSaveRegisters())); JITDUMP("\n"); #endif // Register this with the runtime. info.compCompHnd->setPatchpointInfo(patchpointInfo); } //------------------------------------------------------------------------ // ResetOptAnnotations: Clear annotations produced during global optimizations. // // Notes: // The intent of this method is to clear any information typically assumed // to be set only once; it is used between iterations when JitOptRepeat is // in effect. void Compiler::ResetOptAnnotations() { assert(opts.optRepeat); assert(JitConfig.JitOptRepeatCount() > 0); fgResetForSsa(); vnStore = nullptr; m_opAsgnVarDefSsaNums = nullptr; m_blockToEHPreds = nullptr; fgSsaPassesCompleted = 0; fgVNPassesCompleted = 0; for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { for (GenTree* const tree : stmt->TreeList()) { tree->ClearVN(); tree->ClearAssertion(); tree->gtCSEnum = NO_CSE; } } } } //------------------------------------------------------------------------ // RecomputeLoopInfo: Recompute loop annotations between opt-repeat iterations. // // Notes: // The intent of this method is to update loop structure annotations, and those // they depend on; these annotations may have become stale during optimization, // and need to be up-to-date before running another iteration of optimizations. // void Compiler::RecomputeLoopInfo() { assert(opts.optRepeat); assert(JitConfig.JitOptRepeatCount() > 0); // Recompute reachability sets, dominators, and loops. optResetLoopInfo(); fgDomsComputed = false; fgComputeReachability(); optSetBlockWeights(); // Rebuild the loop tree annotations themselves optFindLoops(); } /*****************************************************************************/ void Compiler::ProcessShutdownWork(ICorStaticInfo* statInfo) { } /*****************************************************************************/ #ifdef DEBUG void* forceFrameJIT; // used to force to frame &useful for fastchecked debugging bool Compiler::skipMethod() { static ConfigMethodRange fJitRange; fJitRange.EnsureInit(JitConfig.JitRange()); assert(!fJitRange.Error()); // Normally JitConfig.JitRange() is null, we don't want to skip // jitting any methods. // // So, the logic below relies on the fact that a null range string // passed to ConfigMethodRange represents the set of all methods. if (!fJitRange.Contains(info.compMethodHash())) { return true; } if (JitConfig.JitExclude().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } if (!JitConfig.JitInclude().isEmpty() && !JitConfig.JitInclude().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } return false; } #endif /*****************************************************************************/ int Compiler::compCompile(CORINFO_MODULE_HANDLE classPtr, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { // compInit should have set these already. noway_assert(info.compMethodInfo != nullptr); noway_assert(info.compCompHnd != nullptr); noway_assert(info.compMethodHnd != nullptr); #ifdef FEATURE_JIT_METHOD_PERF static bool checkedForJitTimeLog = false; pCompJitTimer = nullptr; if (!checkedForJitTimeLog) { // Call into VM to get the config strings. FEATURE_JIT_METHOD_PERF is enabled for // retail builds. Do not call the regular Config helper here as it would pull // in a copy of the config parser into the clrjit.dll. InterlockedCompareExchangeT(&Compiler::compJitTimeLogFilename, (LPCWSTR)info.compCompHnd->getJitTimeLogFilename(), NULL); // At a process or module boundary clear the file and start afresh. JitTimer::PrintCsvHeader(); checkedForJitTimeLog = true; } if ((Compiler::compJitTimeLogFilename != nullptr) || (JitTimeLogCsv() != nullptr)) { pCompJitTimer = JitTimer::Create(this, info.compMethodInfo->ILCodeSize); } #endif // FEATURE_JIT_METHOD_PERF #ifdef DEBUG Compiler* me = this; forceFrameJIT = (void*)&me; // let us see the this pointer in fastchecked build // set this early so we can use it without relying on random memory values verbose = compIsForInlining() ? impInlineInfo->InlinerCompiler->verbose : false; #endif #if FUNC_INFO_LOGGING LPCWSTR tmpJitFuncInfoFilename = JitConfig.JitFuncInfoFile(); if (tmpJitFuncInfoFilename != nullptr) { LPCWSTR oldFuncInfoFileName = InterlockedCompareExchangeT(&compJitFuncInfoFilename, tmpJitFuncInfoFilename, NULL); if (oldFuncInfoFileName == nullptr) { assert(compJitFuncInfoFile == nullptr); compJitFuncInfoFile = _wfopen(compJitFuncInfoFilename, W("a")); if (compJitFuncInfoFile == nullptr) { #if defined(DEBUG) && !defined(HOST_UNIX) // no 'perror' in the PAL perror("Failed to open JitFuncInfoLogFile"); #endif // defined(DEBUG) && !defined(HOST_UNIX) } } } #endif // FUNC_INFO_LOGGING // if (s_compMethodsCount==0) setvbuf(jitstdout, NULL, _IONBF, 0); if (compIsForInlining()) { compileFlags->Clear(JitFlags::JIT_FLAG_OSR); info.compILEntry = 0; info.compPatchpointInfo = nullptr; } else if (compileFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { // Fetch OSR info from the runtime info.compPatchpointInfo = info.compCompHnd->getOSRInfo(&info.compILEntry); assert(info.compPatchpointInfo != nullptr); } #if defined(TARGET_ARM64) compFrameInfo = {0}; #endif virtualStubParamInfo = new (this, CMK_Unknown) VirtualStubParamInfo(IsTargetAbi(CORINFO_CORERT_ABI)); // compMatchedVM is set to true if both CPU/ABI and OS are matching the execution engine requirements // // Do we have a matched VM? Or are we "abusing" the VM to help us do JIT work (such as using an x86 native VM // with an ARM-targeting "altjit"). // Match CPU/ABI for compMatchedVM info.compMatchedVM = IMAGE_FILE_MACHINE_TARGET == info.compCompHnd->getExpectedTargetArchitecture(); // Match OS for compMatchedVM CORINFO_EE_INFO* eeInfo = eeGetEEInfo(); #ifdef TARGET_OS_RUNTIMEDETERMINED noway_assert(TargetOS::OSSettingConfigured); #endif if (TargetOS::IsMacOS) { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_MACOS); } else if (TargetOS::IsUnix) { if (TargetArchitecture::IsX64) { // MacOS x64 uses the Unix jit variant in crossgen2, not a special jit info.compMatchedVM = info.compMatchedVM && ((eeInfo->osType == CORINFO_UNIX) || (eeInfo->osType == CORINFO_MACOS)); } else { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_UNIX); } } else if (TargetOS::IsWindows) { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_WINNT); } // If we are not compiling for a matched VM, then we are getting JIT flags that don't match our target // architecture. The two main examples here are an ARM targeting altjit hosted on x86 and an ARM64 // targeting altjit hosted on x64. (Though with cross-bitness work, the host doesn't necessarily need // to be of the same bitness.) In these cases, we need to fix up the JIT flags to be appropriate for // the target, as the VM's expected target may overlap bit flags with different meaning to our target. // Note that it might be better to do this immediately when setting the JIT flags in CILJit::compileMethod() // (when JitFlags::SetFromFlags() is called), but this is close enough. (To move this logic to // CILJit::compileMethod() would require moving the info.compMatchedVM computation there as well.) if (!info.compMatchedVM) { #if defined(TARGET_ARM) // Currently nothing needs to be done. There are no ARM flags that conflict with other flags. #endif // defined(TARGET_ARM) #if defined(TARGET_ARM64) // The x86/x64 architecture capabilities flags overlap with the ARM64 ones. Set a reasonable architecture // target default. Currently this is disabling all ARM64 architecture features except FP and SIMD, but this // should be altered to possibly enable all of them, when they are known to all work. CORINFO_InstructionSetFlags defaultArm64Flags; defaultArm64Flags.AddInstructionSet(InstructionSet_ArmBase); defaultArm64Flags.AddInstructionSet(InstructionSet_AdvSimd); defaultArm64Flags.Set64BitInstructionSetVariants(); compileFlags->SetInstructionSetFlags(defaultArm64Flags); #endif // defined(TARGET_ARM64) } compMaxUncheckedOffsetForNullObject = eeGetEEInfo()->maxUncheckedOffsetForNullObject; // Set the context for token lookup. if (compIsForInlining()) { impTokenLookupContextHandle = impInlineInfo->tokenLookupContextHandle; assert(impInlineInfo->inlineCandidateInfo->clsHandle == info.compCompHnd->getMethodClass(info.compMethodHnd)); info.compClassHnd = impInlineInfo->inlineCandidateInfo->clsHandle; assert(impInlineInfo->inlineCandidateInfo->clsAttr == info.compCompHnd->getClassAttribs(info.compClassHnd)); // printf("%x != %x\n", impInlineInfo->inlineCandidateInfo->clsAttr, // info.compCompHnd->getClassAttribs(info.compClassHnd)); info.compClassAttr = impInlineInfo->inlineCandidateInfo->clsAttr; } else { impTokenLookupContextHandle = METHOD_BEING_COMPILED_CONTEXT(); info.compClassHnd = info.compCompHnd->getMethodClass(info.compMethodHnd); info.compClassAttr = info.compCompHnd->getClassAttribs(info.compClassHnd); } #ifdef DEBUG if (JitConfig.EnableExtraSuperPmiQueries()) { // This call to getClassModule/getModuleAssembly/getAssemblyName fails in crossgen2 due to these // APIs being unimplemented. So disable this extra info for pre-jit mode. See // https://github.com/dotnet/runtime/issues/48888. // // Ditto for some of the class name queries for generic params. // if (!compileFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // Get the assembly name, to aid finding any particular SuperPMI method context function (void)info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); // Fetch class names for the method's generic parameters. // CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(info.compMethodHnd, &sig, nullptr); const unsigned classInst = sig.sigInst.classInstCount; if (classInst > 0) { for (unsigned i = 0; i < classInst; i++) { eeGetClassName(sig.sigInst.classInst[i]); } } const unsigned methodInst = sig.sigInst.methInstCount; if (methodInst > 0) { for (unsigned i = 0; i < methodInst; i++) { eeGetClassName(sig.sigInst.methInst[i]); } } } } #endif // DEBUG info.compProfilerCallback = false; // Assume false until we are told to hook this method. #ifdef DEBUG if (!compIsForInlining()) { JitTls::GetLogEnv()->setCompiler(this); } // Have we been told to be more selective in our Jitting? if (skipMethod()) { if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_MARKED_AS_SKIPPED); } return CORJIT_SKIPPED; } #endif // DEBUG // Set this before the first 'BADCODE' // Skip verification where possible assert(compileFlags->IsSet(JitFlags::JIT_FLAG_SKIP_VERIFICATION)); /* Setup an error trap */ struct Param { Compiler* pThis; CORINFO_MODULE_HANDLE classPtr; COMP_HANDLE compHnd; CORINFO_METHOD_INFO* methodInfo; void** methodCodePtr; uint32_t* methodCodeSize; JitFlags* compileFlags; int result; } param; param.pThis = this; param.classPtr = classPtr; param.compHnd = info.compCompHnd; param.methodInfo = info.compMethodInfo; param.methodCodePtr = methodCodePtr; param.methodCodeSize = methodCodeSize; param.compileFlags = compileFlags; param.result = CORJIT_INTERNALERROR; setErrorTrap(info.compCompHnd, Param*, pParam, &param) // ERROR TRAP: Start normal block { pParam->result = pParam->pThis->compCompileHelper(pParam->classPtr, pParam->compHnd, pParam->methodInfo, pParam->methodCodePtr, pParam->methodCodeSize, pParam->compileFlags); } finallyErrorTrap() // ERROR TRAP: The following block handles errors { /* Cleanup */ if (compIsForInlining()) { goto DoneCleanUp; } /* Tell the emitter that we're done with this function */ GetEmitter()->emitEndCG(); DoneCleanUp: compDone(); } endErrorTrap() // ERROR TRAP: End return param.result; } #if defined(DEBUG) || defined(INLINE_DATA) //------------------------------------------------------------------------ // compMethodHash: get hash code for currently jitted method // // Returns: // Hash based on method's full name // unsigned Compiler::Info::compMethodHash() const { if (compMethodHashPrivate == 0) { // compMethodHashPrivate = compCompHnd->getMethodHash(compMethodHnd); assert(compFullName != nullptr); assert(*compFullName != 0); COUNT_T hash = HashStringA(compFullName); // Use compFullName to generate the hash, as it contains the signature // and return type compMethodHashPrivate = hash; } return compMethodHashPrivate; } //------------------------------------------------------------------------ // compMethodHash: get hash code for specified method // // Arguments: // methodHnd - method of interest // // Returns: // Hash based on method's full name // unsigned Compiler::compMethodHash(CORINFO_METHOD_HANDLE methodHnd) { // If this is the root method, delegate to the caching version // if (methodHnd == info.compMethodHnd) { return info.compMethodHash(); } // Else compute from scratch. Might consider caching this too. // unsigned methodHash = 0; const char* calleeName = eeGetMethodFullName(methodHnd); if (calleeName != nullptr) { methodHash = HashStringA(calleeName); } else { methodHash = info.compCompHnd->getMethodHash(methodHnd); } return methodHash; } #endif // defined(DEBUG) || defined(INLINE_DATA) void Compiler::compCompileFinish() { #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS genMethodCnt++; #endif #if MEASURE_MEM_ALLOC { compArenaAllocator->finishMemStats(); memAllocHist.record((unsigned)((compArenaAllocator->getTotalBytesAllocated() + 1023) / 1024)); memUsedHist.record((unsigned)((compArenaAllocator->getTotalBytesUsed() + 1023) / 1024)); } #ifdef DEBUG if (s_dspMemStats || verbose) { printf("\nAllocations for %s (MethodHash=%08x)\n", info.compFullName, info.compMethodHash()); compArenaAllocator->dumpMemStats(jitstdout); } #endif // DEBUG #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS AddLoopHoistStats(); #endif // LOOP_HOIST_STATS #if MEASURE_NODE_SIZE genTreeNcntHist.record(static_cast<unsigned>(genNodeSizeStatsPerFunc.genTreeNodeCnt)); genTreeNsizHist.record(static_cast<unsigned>(genNodeSizeStatsPerFunc.genTreeNodeSize)); #endif #if defined(DEBUG) // Small methods should fit in ArenaAllocator::getDefaultPageSize(), or else // we should bump up ArenaAllocator::getDefaultPageSize() if ((info.compILCodeSize <= 32) && // Is it a reasonably small method? (info.compNativeCodeSize < 512) && // Some trivial methods generate huge native code. eg. pushing a single huge // struct (impInlinedCodeSize <= 128) && // Is the the inlining reasonably bounded? // Small methods cannot meaningfully have a big number of locals // or arguments. We always track arguments at the start of // the prolog which requires memory (info.compLocalsCount <= 32) && (!opts.MinOpts()) && // We may have too many local variables, etc (getJitStressLevel() == 0) && // We need extra memory for stress !opts.optRepeat && // We need extra memory to repeat opts !compArenaAllocator->bypassHostAllocator() && // ArenaAllocator::getDefaultPageSize() is artificially low for // DirectAlloc // Factor of 2x is because data-structures are bigger under DEBUG (compArenaAllocator->getTotalBytesAllocated() > (2 * ArenaAllocator::getDefaultPageSize())) && // RyuJIT backend needs memory tuning! TODO-Cleanup: remove this case when memory tuning is complete. (compArenaAllocator->getTotalBytesAllocated() > (10 * ArenaAllocator::getDefaultPageSize())) && !verbose) // We allocate lots of memory to convert sets to strings for JitDump { genSmallMethodsNeedingExtraMemoryCnt++; // Less than 1% of all methods should run into this. // We cannot be more strict as there are always degenerate cases where we // would need extra memory (like huge structs as locals - see lvaSetStruct()). assert((genMethodCnt < 500) || (genSmallMethodsNeedingExtraMemoryCnt < (genMethodCnt / 100))); } #endif // DEBUG #if defined(DEBUG) || defined(INLINE_DATA) m_inlineStrategy->DumpData(); if (JitConfig.JitInlineDumpXmlFile() != nullptr) { FILE* file = _wfopen(JitConfig.JitInlineDumpXmlFile(), W("a")); if (file != nullptr) { m_inlineStrategy->DumpXml(file); fclose(file); } else { m_inlineStrategy->DumpXml(); } } else { m_inlineStrategy->DumpXml(); } #endif #ifdef DEBUG if (opts.dspOrder) { // mdMethodDef __stdcall CEEInfo::getMethodDefFromMethod(CORINFO_METHOD_HANDLE hMethod) mdMethodDef currentMethodToken = info.compCompHnd->getMethodDefFromMethod(info.compMethodHnd); static bool headerPrinted = false; if (!headerPrinted) { // clang-format off headerPrinted = true; printf(" | Profiled | Method | Method has | calls | Num |LclV |AProp| CSE | Perf |bytes | %3s codesize| \n", Target::g_tgtCPUName); printf(" mdToken | CNT | RGN | Hash | EH | FRM | LOOP | NRM | IND | BBs | Cnt | Cnt | Cnt | Score | IL | HOT | CLD | method name \n"); printf("---------+------+------+----------+----+-----+------+-----+-----+-----+-----+-----+-----+---------+------+-------+-----+\n"); // 06001234 | 1234 | HOT | 0f1e2d3c | EH | ebp | LOOP | 15 | 6 | 12 | 17 | 12 | 8 | 1234.56 | 145 | 1234 | 123 | System.Example(int) // clang-format on } printf("%08X | ", currentMethodToken); if (fgHaveProfileData()) { if (fgCalledCount < 1000) { printf("%4.0f | ", fgCalledCount); } else if (fgCalledCount < 1000000) { printf("%3.0fK | ", fgCalledCount / 1000); } else { printf("%3.0fM | ", fgCalledCount / 1000000); } } else { printf(" | "); } CorInfoRegionKind regionKind = info.compMethodInfo->regionKind; if (opts.altJit) { printf("ALT | "); } else if (regionKind == CORINFO_REGION_NONE) { printf(" | "); } else if (regionKind == CORINFO_REGION_HOT) { printf(" HOT | "); } else if (regionKind == CORINFO_REGION_COLD) { printf("COLD | "); } else if (regionKind == CORINFO_REGION_JIT) { printf(" JIT | "); } else { printf("UNKN | "); } printf("%08x | ", info.compMethodHash()); if (compHndBBtabCount > 0) { printf("EH | "); } else { printf(" | "); } if (rpFrameType == FT_EBP_FRAME) { printf("%3s | ", STR_FPBASE); } else if (rpFrameType == FT_ESP_FRAME) { printf("%3s | ", STR_SPBASE); } #if DOUBLE_ALIGN else if (rpFrameType == FT_DOUBLE_ALIGN_FRAME) { printf("dbl | "); } #endif else // (rpFrameType == FT_NOT_SET) { printf("??? | "); } if (fgHasLoops) { printf("LOOP |"); } else { printf(" |"); } printf(" %3d |", optCallCount); printf(" %3d |", optIndirectCallCount); printf(" %3d |", fgBBcountAtCodegen); printf(" %3d |", lvaCount); if (opts.MinOpts()) { printf(" MinOpts |"); } else { printf(" %3d |", optAssertionCount); printf(" %3d |", optCSEcount); } if (info.compPerfScore < 9999.995) { printf(" %7.2f |", info.compPerfScore); } else { printf(" %7.0f |", info.compPerfScore); } printf(" %4d |", info.compMethodInfo->ILCodeSize); printf(" %5d |", info.compTotalHotCodeSize); printf(" %3d |", info.compTotalColdCodeSize); printf(" %s\n", eeGetMethodFullName(info.compMethodHnd)); printf(""); // in our logic this causes a flush } if (verbose) { printf("****** DONE compiling %s\n", info.compFullName); printf(""); // in our logic this causes a flush } #if TRACK_ENREG_STATS for (unsigned i = 0; i < lvaCount; ++i) { const LclVarDsc* varDsc = lvaGetDesc(i); if (varDsc->lvRefCnt() != 0) { s_enregisterStats.RecordLocal(varDsc); } } #endif // TRACK_ENREG_STATS // Only call _DbgBreakCheck when we are jitting, not when we are ngen-ing // For ngen the int3 or breakpoint instruction will be right at the // start of the ngen method and we will stop when we execute it. // if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if (compJitHaltMethod()) { #if !defined(HOST_UNIX) // TODO-UNIX: re-enable this when we have an OS that supports a pop-up dialog // Don't do an assert, but just put up the dialog box so we get just-in-time debugger // launching. When you hit 'retry' it will continue and naturally stop at the INT 3 // that the JIT put in the code _DbgBreakCheck(__FILE__, __LINE__, "JitHalt"); #endif } } #endif // DEBUG } #ifdef PSEUDORANDOM_NOP_INSERTION // this is zlib adler32 checksum. source came from windows base #define BASE 65521L // largest prime smaller than 65536 #define NMAX 5552 // NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 #define DO1(buf, i) \ { \ s1 += buf[i]; \ s2 += s1; \ } #define DO2(buf, i) \ DO1(buf, i); \ DO1(buf, i + 1); #define DO4(buf, i) \ DO2(buf, i); \ DO2(buf, i + 2); #define DO8(buf, i) \ DO4(buf, i); \ DO4(buf, i + 4); #define DO16(buf) \ DO8(buf, 0); \ DO8(buf, 8); unsigned adler32(unsigned adler, char* buf, unsigned int len) { unsigned int s1 = adler & 0xffff; unsigned int s2 = (adler >> 16) & 0xffff; int k; if (buf == NULL) return 1L; while (len > 0) { k = len < NMAX ? len : NMAX; len -= k; while (k >= 16) { DO16(buf); buf += 16; k -= 16; } if (k != 0) do { s1 += *buf++; s2 += s1; } while (--k); s1 %= BASE; s2 %= BASE; } return (s2 << 16) | s1; } #endif unsigned getMethodBodyChecksum(_In_z_ char* code, int size) { #ifdef PSEUDORANDOM_NOP_INSERTION return adler32(0, code, size); #else return 0; #endif } int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { CORINFO_METHOD_HANDLE methodHnd = info.compMethodHnd; info.compCode = methodInfo->ILCode; info.compILCodeSize = methodInfo->ILCodeSize; info.compILImportSize = 0; if (info.compILCodeSize == 0) { BADCODE("code size is zero"); } if (compIsForInlining()) { #ifdef DEBUG unsigned methAttr_Old = impInlineInfo->inlineCandidateInfo->methAttr; unsigned methAttr_New = info.compCompHnd->getMethodAttribs(info.compMethodHnd); unsigned flagsToIgnore = CORINFO_FLG_DONT_INLINE | CORINFO_FLG_FORCEINLINE; assert((methAttr_Old & (~flagsToIgnore)) == (methAttr_New & (~flagsToIgnore))); #endif info.compFlags = impInlineInfo->inlineCandidateInfo->methAttr; compInlineContext = impInlineInfo->inlineContext; } else { info.compFlags = info.compCompHnd->getMethodAttribs(info.compMethodHnd); #ifdef PSEUDORANDOM_NOP_INSERTION info.compChecksum = getMethodBodyChecksum((char*)methodInfo->ILCode, methodInfo->ILCodeSize); #endif compInlineContext = m_inlineStrategy->GetRootContext(); } compSwitchedToOptimized = false; compSwitchedToMinOpts = false; // compInitOptions will set the correct verbose flag. compInitOptions(compileFlags); if (!compIsForInlining() && !opts.altJit && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { // We're an altjit, but the COMPlus_AltJit configuration did not say to compile this method, // so skip it. return CORJIT_SKIPPED; } #ifdef DEBUG if (verbose) { printf("IL to import:\n"); dumpILRange(info.compCode, info.compILCodeSize); } #endif // Check for COMPlus_AggressiveInlining if (JitConfig.JitAggressiveInlining()) { compDoAggressiveInlining = true; } if (compDoAggressiveInlining) { info.compFlags |= CORINFO_FLG_FORCEINLINE; } #ifdef DEBUG // Check for ForceInline stress. if (compStressCompile(STRESS_FORCE_INLINE, 0)) { info.compFlags |= CORINFO_FLG_FORCEINLINE; } if (compIsForInlining()) { JITLOG((LL_INFO100000, "\nINLINER impTokenLookupContextHandle for %s is 0x%p.\n", eeGetMethodFullName(info.compMethodHnd), dspPtr(impTokenLookupContextHandle))); } #endif // DEBUG impCanReimport = compStressCompile(STRESS_CHK_REIMPORT, 15); /* Initialize set a bunch of global values */ info.compScopeHnd = classPtr; info.compXcptnsCount = methodInfo->EHcount; info.compMaxStack = methodInfo->maxStack; compHndBBtab = nullptr; compHndBBtabCount = 0; compHndBBtabAllocCount = 0; info.compNativeCodeSize = 0; info.compTotalHotCodeSize = 0; info.compTotalColdCodeSize = 0; info.compClassProbeCount = 0; compHasBackwardJump = false; compHasBackwardJumpInHandler = false; #ifdef DEBUG compCurBB = nullptr; lvaTable = nullptr; // Reset node and block ID counter compGenTreeID = 0; compStatementID = 0; compBasicBlockID = 0; #endif /* Initialize emitter */ if (!compIsForInlining()) { codeGen->GetEmitter()->emitBegCG(this, compHnd); } info.compIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0; info.compPublishStubParam = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PUBLISH_SECRET_PARAM); info.compHasNextCallRetAddr = false; if (opts.IsReversePInvoke()) { bool unused; info.compCallConv = info.compCompHnd->getUnmanagedCallConv(methodInfo->ftn, nullptr, &unused); info.compArgOrder = Target::g_tgtUnmanagedArgOrder; } else { info.compCallConv = CorInfoCallConvExtension::Managed; info.compArgOrder = Target::g_tgtArgOrder; } info.compIsVarArgs = false; switch (methodInfo->args.getCallConv()) { case CORINFO_CALLCONV_NATIVEVARARG: case CORINFO_CALLCONV_VARARG: info.compIsVarArgs = true; break; default: break; } info.compRetNativeType = info.compRetType = JITtype2varType(methodInfo->args.retType); info.compUnmanagedCallCountWithGCTransition = 0; info.compLvFrameListRoot = BAD_VAR_NUM; info.compInitMem = ((methodInfo->options & CORINFO_OPT_INIT_LOCALS) != 0); /* Allocate the local variable table */ lvaInitTypeRef(); compInitDebuggingInfo(); // If are an altjit and have patchpoint info, we might need to tweak the frame size // so it's plausible for the altjit architecture. // if (!info.compMatchedVM && compileFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { assert(info.compLocalsCount == info.compPatchpointInfo->NumberOfLocals()); const int totalFrameSize = info.compPatchpointInfo->TotalFrameSize(); int frameSizeUpdate = 0; #if defined(TARGET_AMD64) if ((totalFrameSize % 16) != 8) { frameSizeUpdate = 8; } #elif defined(TARGET_ARM64) if ((totalFrameSize % 16) != 0) { frameSizeUpdate = 8; } #endif if (frameSizeUpdate != 0) { JITDUMP("Mismatched altjit + OSR -- updating tier0 frame size from %d to %d\n", totalFrameSize, totalFrameSize + frameSizeUpdate); // Allocate a local copy with altered frame size. // const unsigned patchpointInfoSize = PatchpointInfo::ComputeSize(info.compLocalsCount); PatchpointInfo* const newInfo = (PatchpointInfo*)getAllocator(CMK_Unknown).allocate<char>(patchpointInfoSize); newInfo->Initialize(info.compLocalsCount, totalFrameSize + frameSizeUpdate); newInfo->Copy(info.compPatchpointInfo); // Swap it in place. // info.compPatchpointInfo = newInfo; } } #ifdef DEBUG if (compIsForInlining()) { compBasicBlockID = impInlineInfo->InlinerCompiler->compBasicBlockID; } #endif const bool forceInline = !!(info.compFlags & CORINFO_FLG_FORCEINLINE); if (!compIsForInlining() && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // We're prejitting the root method. We also will analyze it as // a potential inline candidate. InlineResult prejitResult(this, methodHnd, "prejit"); // Profile data allows us to avoid early "too many IL bytes" outs. prejitResult.NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, fgHaveSufficientProfileData()); // Do the initial inline screen. impCanInlineIL(methodHnd, methodInfo, forceInline, &prejitResult); // Temporarily install the prejitResult as the // compInlineResult so it's available to fgFindJumpTargets // and can accumulate more observations as the IL is // scanned. // // We don't pass prejitResult in as a parameter to avoid // potential aliasing confusion -- the other call to // fgFindBasicBlocks may have set up compInlineResult and // the code in fgFindJumpTargets references that data // member extensively. assert(compInlineResult == nullptr); assert(impInlineInfo == nullptr); compInlineResult = &prejitResult; // Find the basic blocks. We must do this regardless of // inlineability, since we are prejitting this method. // // This will also update the status of this method as // an inline candidate. fgFindBasicBlocks(); // Undo the temporary setup. assert(compInlineResult == &prejitResult); compInlineResult = nullptr; // If still a viable, discretionary inline, assess // profitability. if (prejitResult.IsDiscretionaryCandidate()) { prejitResult.DetermineProfitability(methodInfo); } m_inlineStrategy->NotePrejitDecision(prejitResult); // Handle the results of the inline analysis. if (prejitResult.IsFailure()) { // This method is a bad inlinee according to our // analysis. We will let the InlineResult destructor // mark it as noinline in the prejit image to save the // jit some work. // // This decision better not be context-dependent. assert(prejitResult.IsNever()); } else { // This looks like a viable inline candidate. Since // we're not actually inlining, don't report anything. prejitResult.SetReported(); } } else { // We are jitting the root method, or inlining. fgFindBasicBlocks(); // If we are doing OSR, update flow to initially reach the appropriate IL offset. // if (opts.IsOSR()) { fgFixEntryFlowForOSR(); } } // If we're inlining and the candidate is bad, bail out. if (compDonotInline()) { goto _Next; } // We may decide to optimize this method, // to avoid spending a long time stuck in Tier0 code. // if (fgCanSwitchToOptimized()) { // We only expect to be able to do this at Tier0. // assert(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)); // Normal tiering should bail us out of Tier0 tail call induced loops. // So keep these methods in Tier0 if we're gathering PGO data. // If we're not gathering PGO, then switch these to optimized to // minimize the number of tail call helper stubs we might need. // Reconsider this if/when we're able to share those stubs. // // Honor the config setting that tells the jit to // always optimize methods with loops. // // If neither of those apply, and OSR is enabled, the jit may still // decide to optimize, if there's something in the method that // OSR currently cannot handle, or we're optionally suppressing // OSR by method hash. // const char* reason = nullptr; if (compTailPrefixSeen && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { reason = "tail.call and not BBINSTR"; } else if (compHasBackwardJump && ((info.compFlags & CORINFO_FLG_DISABLE_TIER0_FOR_LOOPS) != 0)) { reason = "loop"; } if (compHasBackwardJump && (reason == nullptr) && (JitConfig.TC_OnStackReplacement() > 0)) { const char* noPatchpointReason = nullptr; bool canEscapeViaOSR = compCanHavePatchpoints(&reason); #ifdef DEBUG if (canEscapeViaOSR) { // Optionally disable OSR by method hash. This will force any // method that might otherwise get trapped in Tier0 to be optimized. // static ConfigMethodRange JitEnableOsrRange; JitEnableOsrRange.EnsureInit(JitConfig.JitEnableOsrRange()); const unsigned hash = impInlineRoot()->info.compMethodHash(); if (!JitEnableOsrRange.Contains(hash)) { canEscapeViaOSR = false; reason = "OSR disabled by JitEnableOsrRange"; } } #endif if (canEscapeViaOSR) { JITDUMP("\nOSR enabled for this method\n"); } else { JITDUMP("\nOSR disabled for this method: %s\n", noPatchpointReason); assert(reason != nullptr); } } if (reason != nullptr) { fgSwitchToOptimized(reason); } } compSetOptimizationLevel(); #if COUNT_BASIC_BLOCKS bbCntTable.record(fgBBcount); if (fgBBcount == 1) { bbOneBBSizeTable.record(methodInfo->ILCodeSize); } #endif // COUNT_BASIC_BLOCKS #ifdef DEBUG if (verbose) { printf("Basic block list for '%s'\n", info.compFullName); fgDispBasicBlocks(); } #endif #ifdef DEBUG /* Give the function a unique number */ if (opts.disAsm || verbose) { compMethodID = ~info.compMethodHash() & 0xffff; } else { compMethodID = InterlockedIncrement(&s_compMethodsCount); } #endif if (compIsForInlining()) { compInlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_BASIC_BLOCKS, fgBBcount); if (compInlineResult->IsFailure()) { goto _Next; } } #ifdef DEBUG if ((JitConfig.DumpJittedMethods() == 1) && !compIsForInlining()) { enum { BUFSIZE = 20 }; char osrBuffer[BUFSIZE] = {0}; if (opts.IsOSR()) { // Tiering name already includes "OSR", we just want the IL offset // sprintf_s(osrBuffer, BUFSIZE, " @0x%x", info.compILEntry); } printf("Compiling %4d %s::%s, IL size = %u, hash=0x%08x %s%s%s\n", Compiler::jitTotalMethodCompiled, info.compClassName, info.compMethodName, info.compILCodeSize, info.compMethodHash(), compGetTieringName(), osrBuffer, compGetStressMessage()); } if (compIsForInlining()) { compGenTreeID = impInlineInfo->InlinerCompiler->compGenTreeID; compStatementID = impInlineInfo->InlinerCompiler->compStatementID; } #endif compCompile(methodCodePtr, methodCodeSize, compileFlags); #ifdef DEBUG if (compIsForInlining()) { impInlineInfo->InlinerCompiler->compGenTreeID = compGenTreeID; impInlineInfo->InlinerCompiler->compStatementID = compStatementID; impInlineInfo->InlinerCompiler->compBasicBlockID = compBasicBlockID; } #endif _Next: if (compDonotInline()) { // Verify we have only one inline result in play. assert(impInlineInfo->inlineResult == compInlineResult); } if (!compIsForInlining()) { compCompileFinish(); // Did we just compile for a target architecture that the VM isn't expecting? If so, the VM // can't used the generated code (and we better be an AltJit!). if (!info.compMatchedVM) { return CORJIT_SKIPPED; } #ifdef DEBUG if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT) && JitConfig.RunAltJitCode() == 0) { return CORJIT_SKIPPED; } #endif // DEBUG } /* Success! */ return CORJIT_OK; } //------------------------------------------------------------------------ // compFindLocalVarLinear: Linear search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // offs The offset value which should occur within the life of the variable. // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end or nullptr when there is no match found. // // Description: // Linear search for matching variables with their life begin and end containing // the offset. // or NULL if one couldn't be found. // // Note: // Usually called for scope count = 4. Could be called for values upto 8. // VarScopeDsc* Compiler::compFindLocalVarLinear(unsigned varNum, unsigned offs) { for (unsigned i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* dsc = &info.compVarScopes[i]; if ((dsc->vsdVarNum == varNum) && (dsc->vsdLifeBeg <= offs) && (dsc->vsdLifeEnd > offs)) { return dsc; } } return nullptr; } //------------------------------------------------------------------------ // compFindLocalVar: Search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // offs The offset value which should occur within the life of the variable. // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end. // or NULL if one couldn't be found. // // Description: // Linear search for matching variables with their life begin and end containing // the offset only when the scope count is < MAX_LINEAR_FIND_LCL_SCOPELIST, // else use the hashtable lookup. // VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned offs) { if (info.compVarScopesCount < MAX_LINEAR_FIND_LCL_SCOPELIST) { return compFindLocalVarLinear(varNum, offs); } else { VarScopeDsc* ret = compFindLocalVar(varNum, offs, offs); assert(ret == compFindLocalVarLinear(varNum, offs)); return ret; } } //------------------------------------------------------------------------ // compFindLocalVar: Search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // lifeBeg The life begin of the variable's scope // lifeEnd The life end of the variable's scope // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end, or NULL if one couldn't be found. // // Description: // Following are the steps used: // 1. Index into the hashtable using varNum. // 2. Iterate through the linked list at index varNum to find a matching // var scope. // VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd) { assert(compVarScopeMap != nullptr); VarScopeMapInfo* info; if (compVarScopeMap->Lookup(varNum, &info)) { VarScopeListNode* list = info->head; while (list != nullptr) { if ((list->data->vsdLifeBeg <= lifeBeg) && (list->data->vsdLifeEnd > lifeEnd)) { return list->data; } list = list->next; } } return nullptr; } //------------------------------------------------------------------------- // compInitVarScopeMap: Create a scope map so it can be looked up by varNum // // Description: // Map.K => Map.V :: varNum => List(ScopeDsc) // // Create a scope map that can be indexed by varNum and can be iterated // on it's values to look for matching scope when given an offs or // lifeBeg and lifeEnd. // // Notes: // 1. Build the map only when we think linear search is slow, i.e., // MAX_LINEAR_FIND_LCL_SCOPELIST is large. // 2. Linked list preserves original array order. // void Compiler::compInitVarScopeMap() { if (info.compVarScopesCount < MAX_LINEAR_FIND_LCL_SCOPELIST) { return; } assert(compVarScopeMap == nullptr); compVarScopeMap = new (getAllocator()) VarNumToScopeDscMap(getAllocator()); // 599 prime to limit huge allocations; for ex: duplicated scopes on single var. compVarScopeMap->Reallocate(min(info.compVarScopesCount, 599)); for (unsigned i = 0; i < info.compVarScopesCount; ++i) { unsigned varNum = info.compVarScopes[i].vsdVarNum; VarScopeListNode* node = VarScopeListNode::Create(&info.compVarScopes[i], getAllocator()); // Index by varNum and if the list exists append "node" to the "list". VarScopeMapInfo* info; if (compVarScopeMap->Lookup(varNum, &info)) { info->tail->next = node; info->tail = node; } // Create a new list. else { info = VarScopeMapInfo::Create(node, getAllocator()); compVarScopeMap->Set(varNum, info); } } } struct genCmpLocalVarLifeBeg { bool operator()(const VarScopeDsc* elem1, const VarScopeDsc* elem2) { return elem1->vsdLifeBeg < elem2->vsdLifeBeg; } }; struct genCmpLocalVarLifeEnd { bool operator()(const VarScopeDsc* elem1, const VarScopeDsc* elem2) { return elem1->vsdLifeEnd < elem2->vsdLifeEnd; } }; inline void Compiler::compInitScopeLists() { if (info.compVarScopesCount == 0) { compEnterScopeList = compExitScopeList = nullptr; return; } // Populate the 'compEnterScopeList' and 'compExitScopeList' lists compEnterScopeList = new (this, CMK_DebugInfo) VarScopeDsc*[info.compVarScopesCount]; compExitScopeList = new (this, CMK_DebugInfo) VarScopeDsc*[info.compVarScopesCount]; for (unsigned i = 0; i < info.compVarScopesCount; i++) { compEnterScopeList[i] = compExitScopeList[i] = &info.compVarScopes[i]; } jitstd::sort(compEnterScopeList, compEnterScopeList + info.compVarScopesCount, genCmpLocalVarLifeBeg()); jitstd::sort(compExitScopeList, compExitScopeList + info.compVarScopesCount, genCmpLocalVarLifeEnd()); } void Compiler::compResetScopeLists() { if (info.compVarScopesCount == 0) { return; } assert(compEnterScopeList && compExitScopeList); compNextEnterScope = compNextExitScope = 0; } VarScopeDsc* Compiler::compGetNextEnterScope(unsigned offs, bool scan) { assert(info.compVarScopesCount); assert(compEnterScopeList && compExitScopeList); if (compNextEnterScope < info.compVarScopesCount) { assert(compEnterScopeList[compNextEnterScope]); unsigned nextEnterOff = compEnterScopeList[compNextEnterScope]->vsdLifeBeg; assert(scan || (offs <= nextEnterOff)); if (!scan) { if (offs == nextEnterOff) { return compEnterScopeList[compNextEnterScope++]; } } else { if (nextEnterOff <= offs) { return compEnterScopeList[compNextEnterScope++]; } } } return nullptr; } VarScopeDsc* Compiler::compGetNextExitScope(unsigned offs, bool scan) { assert(info.compVarScopesCount); assert(compEnterScopeList && compExitScopeList); if (compNextExitScope < info.compVarScopesCount) { assert(compExitScopeList[compNextExitScope]); unsigned nextExitOffs = compExitScopeList[compNextExitScope]->vsdLifeEnd; assert(scan || (offs <= nextExitOffs)); if (!scan) { if (offs == nextExitOffs) { return compExitScopeList[compNextExitScope++]; } } else { if (nextExitOffs <= offs) { return compExitScopeList[compNextExitScope++]; } } } return nullptr; } // The function will call the callback functions for scopes with boundaries // at instrs from the current status of the scope lists to 'offset', // ordered by instrs. void Compiler::compProcessScopesUntil(unsigned offset, VARSET_TP* inScope, void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*), void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*)) { assert(offset != BAD_IL_OFFSET); assert(inScope != nullptr); bool foundExit = false, foundEnter = true; VarScopeDsc* scope; VarScopeDsc* nextExitScope = nullptr; VarScopeDsc* nextEnterScope = nullptr; unsigned offs = offset, curEnterOffs = 0; goto START_FINDING_SCOPES; // We need to determine the scopes which are open for the current block. // This loop walks over the missing blocks between the current and the // previous block, keeping the enter and exit offsets in lockstep. do { foundExit = foundEnter = false; if (nextExitScope) { (this->*exitScopeFn)(inScope, nextExitScope); nextExitScope = nullptr; foundExit = true; } offs = nextEnterScope ? nextEnterScope->vsdLifeBeg : offset; while ((scope = compGetNextExitScope(offs, true)) != nullptr) { foundExit = true; if (!nextEnterScope || scope->vsdLifeEnd > nextEnterScope->vsdLifeBeg) { // We overshot the last found Enter scope. Save the scope for later // and find an entering scope nextExitScope = scope; break; } (this->*exitScopeFn)(inScope, scope); } if (nextEnterScope) { (this->*enterScopeFn)(inScope, nextEnterScope); curEnterOffs = nextEnterScope->vsdLifeBeg; nextEnterScope = nullptr; foundEnter = true; } offs = nextExitScope ? nextExitScope->vsdLifeEnd : offset; START_FINDING_SCOPES: while ((scope = compGetNextEnterScope(offs, true)) != nullptr) { foundEnter = true; if ((nextExitScope && scope->vsdLifeBeg >= nextExitScope->vsdLifeEnd) || (scope->vsdLifeBeg > curEnterOffs)) { // We overshot the last found exit scope. Save the scope for later // and find an exiting scope nextEnterScope = scope; break; } (this->*enterScopeFn)(inScope, scope); if (!nextExitScope) { curEnterOffs = scope->vsdLifeBeg; } } } while (foundExit || foundEnter); } #if defined(DEBUG) void Compiler::compDispScopeLists() { unsigned i; printf("Local variable scopes = %d\n", info.compVarScopesCount); if (info.compVarScopesCount) { printf(" \tVarNum \tLVNum \t Name \tBeg \tEnd\n"); } printf("Sorted by enter scope:\n"); for (i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = compEnterScopeList[i]; assert(varScope); printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); if (compNextEnterScope == i) { printf(" <-- next enter scope"); } printf("\n"); } printf("Sorted by exit scope:\n"); for (i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = compExitScopeList[i]; assert(varScope); printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); if (compNextExitScope == i) { printf(" <-- next exit scope"); } printf("\n"); } } void Compiler::compDispLocalVars() { printf("info.compVarScopesCount = %d\n", info.compVarScopesCount); if (info.compVarScopesCount > 0) { printf(" \tVarNum \tLVNum \t Name \tBeg \tEnd\n"); } for (unsigned i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = &info.compVarScopes[i]; printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh\n", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); } } #endif // DEBUG /*****************************************************************************/ #if MEASURE_CLRAPI_CALLS struct WrapICorJitInfo : public ICorJitInfo { //------------------------------------------------------------------------ // WrapICorJitInfo::makeOne: allocate an instance of WrapICorJitInfo // // Arguments: // alloc - the allocator to get memory from for the instance // compile - the compiler instance // compHndRef - the ICorJitInfo handle from the EE; the caller's // copy may be replaced with a "wrapper" instance // // Return Value: // If the config flags indicate that ICorJitInfo should be wrapped, // we return the "wrapper" instance; otherwise we return "nullptr". static WrapICorJitInfo* makeOne(ArenaAllocator* alloc, Compiler* compiler, COMP_HANDLE& compHndRef /* INOUT */) { WrapICorJitInfo* wrap = nullptr; if (JitConfig.JitEECallTimingInfo() != 0) { // It's too early to use the default allocator, so we do this // in two steps to be safe (the constructor doesn't need to do // anything except fill in the vtable pointer, so we let the // compiler do it). void* inst = alloc->allocateMemory(roundUp(sizeof(WrapICorJitInfo))); if (inst != nullptr) { // If you get a build error here due to 'WrapICorJitInfo' being // an abstract class, it's very likely that the wrapper bodies // in ICorJitInfo_API_wrapper.hpp are no longer in sync with // the EE interface; please be kind and update the header file. wrap = new (inst, jitstd::placement_t()) WrapICorJitInfo(); wrap->wrapComp = compiler; // Save the real handle and replace it with our wrapped version. wrap->wrapHnd = compHndRef; compHndRef = wrap; } } return wrap; } private: Compiler* wrapComp; COMP_HANDLE wrapHnd; // the "real thing" public: #include "ICorJitInfo_API_wrapper.hpp" }; #endif // MEASURE_CLRAPI_CALLS /*****************************************************************************/ // Compile a single method int jitNativeCode(CORINFO_METHOD_HANDLE methodHnd, CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags, void* inlineInfoPtr) { // // A non-NULL inlineInfo means we are compiling the inlinee method. // InlineInfo* inlineInfo = (InlineInfo*)inlineInfoPtr; bool jitFallbackCompile = false; START: int result = CORJIT_INTERNALERROR; ArenaAllocator* pAlloc = nullptr; ArenaAllocator alloc; #if MEASURE_CLRAPI_CALLS WrapICorJitInfo* wrapCLR = nullptr; #endif if (inlineInfo) { // Use inliner's memory allocator when compiling the inlinee. pAlloc = inlineInfo->InlinerCompiler->compGetArenaAllocator(); } else { pAlloc = &alloc; } Compiler* pComp; pComp = nullptr; struct Param { Compiler* pComp; ArenaAllocator* pAlloc; bool jitFallbackCompile; CORINFO_METHOD_HANDLE methodHnd; CORINFO_MODULE_HANDLE classPtr; COMP_HANDLE compHnd; CORINFO_METHOD_INFO* methodInfo; void** methodCodePtr; uint32_t* methodCodeSize; JitFlags* compileFlags; InlineInfo* inlineInfo; #if MEASURE_CLRAPI_CALLS WrapICorJitInfo* wrapCLR; #endif int result; } param; param.pComp = nullptr; param.pAlloc = pAlloc; param.jitFallbackCompile = jitFallbackCompile; param.methodHnd = methodHnd; param.classPtr = classPtr; param.compHnd = compHnd; param.methodInfo = methodInfo; param.methodCodePtr = methodCodePtr; param.methodCodeSize = methodCodeSize; param.compileFlags = compileFlags; param.inlineInfo = inlineInfo; #if MEASURE_CLRAPI_CALLS param.wrapCLR = nullptr; #endif param.result = result; setErrorTrap(compHnd, Param*, pParamOuter, &param) { setErrorTrap(nullptr, Param*, pParam, pParamOuter) { if (pParam->inlineInfo) { // Lazily create the inlinee compiler object if (pParam->inlineInfo->InlinerCompiler->InlineeCompiler == nullptr) { pParam->inlineInfo->InlinerCompiler->InlineeCompiler = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); } // Use the inlinee compiler object pParam->pComp = pParam->inlineInfo->InlinerCompiler->InlineeCompiler; #ifdef DEBUG // memset(pParam->pComp, 0xEE, sizeof(Compiler)); #endif } else { // Allocate create the inliner compiler object pParam->pComp = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); } #if MEASURE_CLRAPI_CALLS pParam->wrapCLR = WrapICorJitInfo::makeOne(pParam->pAlloc, pParam->pComp, pParam->compHnd); #endif // push this compiler on the stack (TLS) pParam->pComp->prevCompiler = JitTls::GetCompiler(); JitTls::SetCompiler(pParam->pComp); // PREFIX_ASSUME gets turned into ASSERT_CHECK and we cannot have it here #if defined(_PREFAST_) || defined(_PREFIX_) PREFIX_ASSUME(pParam->pComp != NULL); #else assert(pParam->pComp != nullptr); #endif pParam->pComp->compInit(pParam->pAlloc, pParam->methodHnd, pParam->compHnd, pParam->methodInfo, pParam->inlineInfo); #ifdef DEBUG pParam->pComp->jitFallbackCompile = pParam->jitFallbackCompile; #endif // Now generate the code pParam->result = pParam->pComp->compCompile(pParam->classPtr, pParam->methodCodePtr, pParam->methodCodeSize, pParam->compileFlags); } finallyErrorTrap() { Compiler* pCompiler = pParamOuter->pComp; // If OOM is thrown when allocating memory for a pComp, we will end up here. // For this case, pComp and also pCompiler will be a nullptr // if (pCompiler != nullptr) { pCompiler->info.compCode = nullptr; // pop the compiler off the TLS stack only if it was linked above assert(JitTls::GetCompiler() == pCompiler); JitTls::SetCompiler(pCompiler->prevCompiler); } if (pParamOuter->inlineInfo == nullptr) { // Free up the allocator we were using pParamOuter->pAlloc->destroy(); } } endErrorTrap() } impJitErrorTrap() { // If we were looking at an inlinee.... if (inlineInfo != nullptr) { // Note that we failed to compile the inlinee, and that // there's no point trying to inline it again anywhere else. inlineInfo->inlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); } param.result = __errc; } endErrorTrap() result = param.result; if (!inlineInfo && (result == CORJIT_INTERNALERROR || result == CORJIT_RECOVERABLEERROR || result == CORJIT_IMPLLIMITATION) && !jitFallbackCompile) { // If we failed the JIT, reattempt with debuggable code. jitFallbackCompile = true; // Update the flags for 'safer' code generation. compileFlags->Set(JitFlags::JIT_FLAG_MIN_OPT); compileFlags->Clear(JitFlags::JIT_FLAG_SIZE_OPT); compileFlags->Clear(JitFlags::JIT_FLAG_SPEED_OPT); goto START; } return result; } #if defined(UNIX_AMD64_ABI) // GetTypeFromClassificationAndSizes: // Returns the type of the eightbyte accounting for the classification and size of the eightbyte. // // args: // classType: classification type // size: size of the eightbyte. // // static var_types Compiler::GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size) { var_types type = TYP_UNKNOWN; switch (classType) { case SystemVClassificationTypeInteger: if (size == 1) { type = TYP_BYTE; } else if (size <= 2) { type = TYP_SHORT; } else if (size <= 4) { type = TYP_INT; } else if (size <= 8) { type = TYP_LONG; } else { assert(false && "GetTypeFromClassificationAndSizes Invalid Integer classification type."); } break; case SystemVClassificationTypeIntegerReference: type = TYP_REF; break; case SystemVClassificationTypeIntegerByRef: type = TYP_BYREF; break; case SystemVClassificationTypeSSE: if (size <= 4) { type = TYP_FLOAT; } else if (size <= 8) { type = TYP_DOUBLE; } else { assert(false && "GetTypeFromClassificationAndSizes Invalid SSE classification type."); } break; default: assert(false && "GetTypeFromClassificationAndSizes Invalid classification type."); break; } return type; } //------------------------------------------------------------------- // GetEightByteType: Returns the type of eightbyte slot of a struct // // Arguments: // structDesc - struct classification description. // slotNum - eightbyte slot number for the struct. // // Return Value: // type of the eightbyte slot of the struct // // static var_types Compiler::GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum) { var_types eightByteType = TYP_UNDEF; unsigned len = structDesc.eightByteSizes[slotNum]; switch (structDesc.eightByteClassifications[slotNum]) { case SystemVClassificationTypeInteger: // See typelist.h for jit type definition. // All the types of size < 4 bytes are of jit type TYP_INT. if (structDesc.eightByteSizes[slotNum] <= 4) { eightByteType = TYP_INT; } else if (structDesc.eightByteSizes[slotNum] <= 8) { eightByteType = TYP_LONG; } else { assert(false && "GetEightByteType Invalid Integer classification type."); } break; case SystemVClassificationTypeIntegerReference: assert(len == REGSIZE_BYTES); eightByteType = TYP_REF; break; case SystemVClassificationTypeIntegerByRef: assert(len == REGSIZE_BYTES); eightByteType = TYP_BYREF; break; case SystemVClassificationTypeSSE: if (structDesc.eightByteSizes[slotNum] <= 4) { eightByteType = TYP_FLOAT; } else if (structDesc.eightByteSizes[slotNum] <= 8) { eightByteType = TYP_DOUBLE; } else { assert(false && "GetEightByteType Invalid SSE classification type."); } break; default: assert(false && "GetEightByteType Invalid classification type."); break; } return eightByteType; } //------------------------------------------------------------------------------------------------------ // GetStructTypeOffset: Gets the type, size and offset of the eightbytes of a struct for System V systems. // // Arguments: // 'structDesc' - struct description // 'type0' - out param; returns the type of the first eightbyte. // 'type1' - out param; returns the type of the second eightbyte. // 'offset0' - out param; returns the offset of the first eightbyte. // 'offset1' - out param; returns the offset of the second eightbyte. // // static void Compiler::GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1) { *offset0 = structDesc.eightByteOffsets[0]; *offset1 = structDesc.eightByteOffsets[1]; *type0 = TYP_UNKNOWN; *type1 = TYP_UNKNOWN; // Set the first eightbyte data if (structDesc.eightByteCount >= 1) { *type0 = GetEightByteType(structDesc, 0); } // Set the second eight byte data if (structDesc.eightByteCount == 2) { *type1 = GetEightByteType(structDesc, 1); } } //------------------------------------------------------------------------------------------------------ // GetStructTypeOffset: Gets the type, size and offset of the eightbytes of a struct for System V systems. // // Arguments: // 'typeHnd' - type handle // 'type0' - out param; returns the type of the first eightbyte. // 'type1' - out param; returns the type of the second eightbyte. // 'offset0' - out param; returns the offset of the first eightbyte. // 'offset1' - out param; returns the offset of the second eightbyte. // void Compiler::GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1) { SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc); assert(structDesc.passedInRegisters); GetStructTypeOffset(structDesc, type0, type1, offset0, offset1); } #endif // defined(UNIX_AMD64_ABI) /*****************************************************************************/ /*****************************************************************************/ #ifdef DEBUG Compiler::NodeToIntMap* Compiler::FindReachableNodesInNodeTestData() { NodeToIntMap* reachable = new (getAllocatorDebugOnly()) NodeToIntMap(getAllocatorDebugOnly()); if (m_nodeTestData == nullptr) { return reachable; } // Otherwise, iterate. for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->NonPhiStatements()) { for (GenTree* const tree : stmt->TreeList()) { TestLabelAndNum tlAndN; // For call nodes, translate late args to what they stand for. if (tree->OperGet() == GT_CALL) { GenTreeCall* call = tree->AsCall(); unsigned i = 0; for (GenTreeCall::Use& use : call->Args()) { if ((use.GetNode()->gtFlags & GTF_LATE_ARG) != 0) { // Find the corresponding late arg. GenTree* lateArg = call->fgArgInfo->GetArgNode(i); if (GetNodeTestData()->Lookup(lateArg, &tlAndN)) { reachable->Set(lateArg, 0); } } i++; } } if (GetNodeTestData()->Lookup(tree, &tlAndN)) { reachable->Set(tree, 0); } } } } return reachable; } void Compiler::TransferTestDataToNode(GenTree* from, GenTree* to) { TestLabelAndNum tlAndN; // We can't currently associate multiple annotations with a single node. // If we need to, we can fix this... // If the table is null, don't create it just to do the lookup, which would fail... if (m_nodeTestData != nullptr && GetNodeTestData()->Lookup(from, &tlAndN)) { assert(!GetNodeTestData()->Lookup(to, &tlAndN)); // We can't currently associate multiple annotations with a single node. // If we need to, we can fix this... TestLabelAndNum tlAndNTo; assert(!GetNodeTestData()->Lookup(to, &tlAndNTo)); GetNodeTestData()->Remove(from); GetNodeTestData()->Set(to, tlAndN); } } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX jvc XX XX XX XX Functions for the stand-alone version of the JIT . XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ void codeGeneratorCodeSizeBeg() { } /***************************************************************************** * * Used for counting pointer assignments. */ /*****************************************************************************/ void codeGeneratorCodeSizeEnd() { } /***************************************************************************** * * Gather statistics - mainly used for the standalone * Enable various #ifdef's to get the information you need */ void Compiler::compJitStats() { #if CALL_ARG_STATS /* Method types and argument statistics */ compCallArgStats(); #endif // CALL_ARG_STATS } #if CALL_ARG_STATS /***************************************************************************** * * Gather statistics about method calls and arguments */ void Compiler::compCallArgStats() { unsigned argNum; unsigned argDWordNum; unsigned argLngNum; unsigned argFltNum; unsigned argDblNum; unsigned regArgNum; unsigned regArgDeferred; unsigned regArgTemp; unsigned regArgLclVar; unsigned regArgConst; unsigned argTempsThisMethod = 0; assert(fgStmtListThreaded); for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { for (GenTree* const call : stmt->TreeList()) { if (call->gtOper != GT_CALL) continue; argNum = regArgNum = regArgDeferred = regArgTemp = regArgConst = regArgLclVar = argDWordNum = argLngNum = argFltNum = argDblNum = 0; argTotalCalls++; if (call->AsCall()->gtCallThisArg == nullptr) { if (call->AsCall()->gtCallType == CT_HELPER) { argHelperCalls++; } else { argStaticCalls++; } } else { /* We have a 'this' pointer */ argDWordNum++; argNum++; regArgNum++; regArgDeferred++; argTotalObjPtr++; if (call->AsCall()->IsVirtual()) { /* virtual function */ argVirtualCalls++; } else { argNonVirtualCalls++; } } } } } argTempsCntTable.record(argTempsThisMethod); if (argMaxTempsPerMethod < argTempsThisMethod) { argMaxTempsPerMethod = argTempsThisMethod; } } /* static */ void Compiler::compDispCallArgStats(FILE* fout) { if (argTotalCalls == 0) return; fprintf(fout, "\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Call stats\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Total # of calls = %d, calls / method = %.3f\n\n", argTotalCalls, (float)argTotalCalls / genMethodCnt); fprintf(fout, "Percentage of helper calls = %4.2f %%\n", (float)(100 * argHelperCalls) / argTotalCalls); fprintf(fout, "Percentage of static calls = %4.2f %%\n", (float)(100 * argStaticCalls) / argTotalCalls); fprintf(fout, "Percentage of virtual calls = %4.2f %%\n", (float)(100 * argVirtualCalls) / argTotalCalls); fprintf(fout, "Percentage of non-virtual calls = %4.2f %%\n\n", (float)(100 * argNonVirtualCalls) / argTotalCalls); fprintf(fout, "Average # of arguments per call = %.2f%%\n\n", (float)argTotalArgs / argTotalCalls); fprintf(fout, "Percentage of DWORD arguments = %.2f %%\n", (float)(100 * argTotalDWordArgs) / argTotalArgs); fprintf(fout, "Percentage of LONG arguments = %.2f %%\n", (float)(100 * argTotalLongArgs) / argTotalArgs); fprintf(fout, "Percentage of FLOAT arguments = %.2f %%\n", (float)(100 * argTotalFloatArgs) / argTotalArgs); fprintf(fout, "Percentage of DOUBLE arguments = %.2f %%\n\n", (float)(100 * argTotalDoubleArgs) / argTotalArgs); if (argTotalRegArgs == 0) return; /* fprintf(fout, "Total deferred arguments = %d \n", argTotalDeferred); fprintf(fout, "Total temp arguments = %d \n\n", argTotalTemps); fprintf(fout, "Total 'this' arguments = %d \n", argTotalObjPtr); fprintf(fout, "Total local var arguments = %d \n", argTotalLclVar); fprintf(fout, "Total constant arguments = %d \n\n", argTotalConst); */ fprintf(fout, "\nRegister Arguments:\n\n"); fprintf(fout, "Percentage of deferred arguments = %.2f %%\n", (float)(100 * argTotalDeferred) / argTotalRegArgs); fprintf(fout, "Percentage of temp arguments = %.2f %%\n\n", (float)(100 * argTotalTemps) / argTotalRegArgs); fprintf(fout, "Maximum # of temps per method = %d\n\n", argMaxTempsPerMethod); fprintf(fout, "Percentage of ObjPtr arguments = %.2f %%\n", (float)(100 * argTotalObjPtr) / argTotalRegArgs); // fprintf(fout, "Percentage of global arguments = %.2f %%\n", (float)(100 * argTotalDWordGlobEf) / // argTotalRegArgs); fprintf(fout, "Percentage of constant arguments = %.2f %%\n", (float)(100 * argTotalConst) / argTotalRegArgs); fprintf(fout, "Percentage of lcl var arguments = %.2f %%\n\n", (float)(100 * argTotalLclVar) / argTotalRegArgs); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Argument count frequency table (includes ObjPtr):\n"); fprintf(fout, "--------------------------------------------------\n"); argCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "DWORD argument count frequency table (w/o LONG):\n"); fprintf(fout, "--------------------------------------------------\n"); argDWordCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Temps count frequency table (per method):\n"); fprintf(fout, "--------------------------------------------------\n"); argTempsCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); /* fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "DWORD argument count frequency table (w/ LONG):\n"); fprintf(fout, "--------------------------------------------------\n"); argDWordLngCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); */ } #endif // CALL_ARG_STATS // JIT time end to end, and by phases. #ifdef FEATURE_JIT_METHOD_PERF // Static variables CritSecObject CompTimeSummaryInfo::s_compTimeSummaryLock; CompTimeSummaryInfo CompTimeSummaryInfo::s_compTimeSummary; #if MEASURE_CLRAPI_CALLS double JitTimer::s_cyclesPerSec = CachedCyclesPerSecond(); #endif #endif // FEATURE_JIT_METHOD_PERF #if defined(FEATURE_JIT_METHOD_PERF) || DUMP_FLOWGRAPHS || defined(FEATURE_TRACELOGGING) const char* PhaseNames[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) string_nm, #include "compphases.h" }; const char* PhaseEnums[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) #enum_nm, #include "compphases.h" }; const LPCWSTR PhaseShortNames[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) W(short_nm), #include "compphases.h" }; #endif // defined(FEATURE_JIT_METHOD_PERF) || DUMP_FLOWGRAPHS #ifdef FEATURE_JIT_METHOD_PERF bool PhaseHasChildren[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) hasChildren, #include "compphases.h" }; int PhaseParent[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) parent, #include "compphases.h" }; bool PhaseReportsIRSize[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) measureIR, #include "compphases.h" }; CompTimeInfo::CompTimeInfo(unsigned byteCodeBytes) : m_byteCodeBytes(byteCodeBytes) , m_totalCycles(0) , m_parentPhaseEndSlop(0) , m_timerFailure(false) #if MEASURE_CLRAPI_CALLS , m_allClrAPIcalls(0) , m_allClrAPIcycles(0) #endif { for (int i = 0; i < PHASE_NUMBER_OF; i++) { m_invokesByPhase[i] = 0; m_cyclesByPhase[i] = 0; #if MEASURE_CLRAPI_CALLS m_CLRinvokesByPhase[i] = 0; m_CLRcyclesByPhase[i] = 0; #endif } #if MEASURE_CLRAPI_CALLS assert(ArrLen(m_perClrAPIcalls) == API_ICorJitInfo_Names::API_COUNT); assert(ArrLen(m_perClrAPIcycles) == API_ICorJitInfo_Names::API_COUNT); assert(ArrLen(m_maxClrAPIcycles) == API_ICorJitInfo_Names::API_COUNT); for (int i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { m_perClrAPIcalls[i] = 0; m_perClrAPIcycles[i] = 0; m_maxClrAPIcycles[i] = 0; } #endif } bool CompTimeSummaryInfo::IncludedInFilteredData(CompTimeInfo& info) { return false; // info.m_byteCodeBytes < 10; } //------------------------------------------------------------------------ // CompTimeSummaryInfo::AddInfo: Record timing info from one compile. // // Arguments: // info - The timing information to record. // includePhases - If "true", the per-phase info in "info" is valid, // which means that a "normal" compile has ended; if // the value is "false" we are recording the results // of a partial compile (typically an import-only run // on behalf of the inliner) in which case the phase // info is not valid and so we only record EE call // overhead. void CompTimeSummaryInfo::AddInfo(CompTimeInfo& info, bool includePhases) { if (info.m_timerFailure) { return; // Don't update if there was a failure. } CritSecHolder timeLock(s_compTimeSummaryLock); if (includePhases) { bool includeInFiltered = IncludedInFilteredData(info); m_numMethods++; // Update the totals and maxima. m_total.m_byteCodeBytes += info.m_byteCodeBytes; m_maximum.m_byteCodeBytes = max(m_maximum.m_byteCodeBytes, info.m_byteCodeBytes); m_total.m_totalCycles += info.m_totalCycles; m_maximum.m_totalCycles = max(m_maximum.m_totalCycles, info.m_totalCycles); #if MEASURE_CLRAPI_CALLS // Update the CLR-API values. m_total.m_allClrAPIcalls += info.m_allClrAPIcalls; m_maximum.m_allClrAPIcalls = max(m_maximum.m_allClrAPIcalls, info.m_allClrAPIcalls); m_total.m_allClrAPIcycles += info.m_allClrAPIcycles; m_maximum.m_allClrAPIcycles = max(m_maximum.m_allClrAPIcycles, info.m_allClrAPIcycles); #endif if (includeInFiltered) { m_numFilteredMethods++; m_filtered.m_byteCodeBytes += info.m_byteCodeBytes; m_filtered.m_totalCycles += info.m_totalCycles; m_filtered.m_parentPhaseEndSlop += info.m_parentPhaseEndSlop; } for (int i = 0; i < PHASE_NUMBER_OF; i++) { m_total.m_invokesByPhase[i] += info.m_invokesByPhase[i]; m_total.m_cyclesByPhase[i] += info.m_cyclesByPhase[i]; #if MEASURE_CLRAPI_CALLS m_total.m_CLRinvokesByPhase[i] += info.m_CLRinvokesByPhase[i]; m_total.m_CLRcyclesByPhase[i] += info.m_CLRcyclesByPhase[i]; #endif if (includeInFiltered) { m_filtered.m_invokesByPhase[i] += info.m_invokesByPhase[i]; m_filtered.m_cyclesByPhase[i] += info.m_cyclesByPhase[i]; #if MEASURE_CLRAPI_CALLS m_filtered.m_CLRinvokesByPhase[i] += info.m_CLRinvokesByPhase[i]; m_filtered.m_CLRcyclesByPhase[i] += info.m_CLRcyclesByPhase[i]; #endif } m_maximum.m_cyclesByPhase[i] = max(m_maximum.m_cyclesByPhase[i], info.m_cyclesByPhase[i]); #if MEASURE_CLRAPI_CALLS m_maximum.m_CLRcyclesByPhase[i] = max(m_maximum.m_CLRcyclesByPhase[i], info.m_CLRcyclesByPhase[i]); #endif } m_total.m_parentPhaseEndSlop += info.m_parentPhaseEndSlop; m_maximum.m_parentPhaseEndSlop = max(m_maximum.m_parentPhaseEndSlop, info.m_parentPhaseEndSlop); } #if MEASURE_CLRAPI_CALLS else { m_totMethods++; // Update the "global" CLR-API values. m_total.m_allClrAPIcalls += info.m_allClrAPIcalls; m_maximum.m_allClrAPIcalls = max(m_maximum.m_allClrAPIcalls, info.m_allClrAPIcalls); m_total.m_allClrAPIcycles += info.m_allClrAPIcycles; m_maximum.m_allClrAPIcycles = max(m_maximum.m_allClrAPIcycles, info.m_allClrAPIcycles); // Update the per-phase CLR-API values. m_total.m_invokesByPhase[PHASE_CLR_API] += info.m_allClrAPIcalls; m_maximum.m_invokesByPhase[PHASE_CLR_API] = max(m_maximum.m_perClrAPIcalls[PHASE_CLR_API], info.m_allClrAPIcalls); m_total.m_cyclesByPhase[PHASE_CLR_API] += info.m_allClrAPIcycles; m_maximum.m_cyclesByPhase[PHASE_CLR_API] = max(m_maximum.m_cyclesByPhase[PHASE_CLR_API], info.m_allClrAPIcycles); } for (int i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { m_total.m_perClrAPIcalls[i] += info.m_perClrAPIcalls[i]; m_maximum.m_perClrAPIcalls[i] = max(m_maximum.m_perClrAPIcalls[i], info.m_perClrAPIcalls[i]); m_total.m_perClrAPIcycles[i] += info.m_perClrAPIcycles[i]; m_maximum.m_perClrAPIcycles[i] = max(m_maximum.m_perClrAPIcycles[i], info.m_perClrAPIcycles[i]); m_maximum.m_maxClrAPIcycles[i] = max(m_maximum.m_maxClrAPIcycles[i], info.m_maxClrAPIcycles[i]); } #endif } // Static LPCWSTR Compiler::compJitTimeLogFilename = nullptr; void CompTimeSummaryInfo::Print(FILE* f) { if (f == nullptr) { return; } // Otherwise... double countsPerSec = CachedCyclesPerSecond(); if (countsPerSec == 0.0) { fprintf(f, "Processor does not have a high-frequency timer.\n"); return; } double totTime_ms = 0.0; fprintf(f, "JIT Compilation time report:\n"); fprintf(f, " Compiled %d methods.\n", m_numMethods); if (m_numMethods != 0) { fprintf(f, " Compiled %d bytecodes total (%d max, %8.2f avg).\n", m_total.m_byteCodeBytes, m_maximum.m_byteCodeBytes, (double)m_total.m_byteCodeBytes / (double)m_numMethods); totTime_ms = ((double)m_total.m_totalCycles / countsPerSec) * 1000.0; fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n", ((double)m_total.m_totalCycles / 1000000.0), totTime_ms); fprintf(f, " max: %10.3f Mcycles/%10.3f ms\n", ((double)m_maximum.m_totalCycles) / 1000000.0, ((double)m_maximum.m_totalCycles / countsPerSec) * 1000.0); fprintf(f, " avg: %10.3f Mcycles/%10.3f ms\n", ((double)m_total.m_totalCycles) / 1000000.0 / (double)m_numMethods, totTime_ms / (double)m_numMethods); const char* extraHdr1 = ""; const char* extraHdr2 = ""; #if MEASURE_CLRAPI_CALLS bool extraInfo = (JitConfig.JitEECallTimingInfo() != 0); if (extraInfo) { extraHdr1 = " CLRs/meth % in CLR"; extraHdr2 = "-----------------------"; } #endif fprintf(f, "\n Total time by phases:\n"); fprintf(f, " PHASE inv/meth Mcycles time (ms) %% of total max (ms)%s\n", extraHdr1); fprintf(f, " ---------------------------------------------------------------------------------------%s\n", extraHdr2); // Ensure that at least the names array and the Phases enum have the same number of entries: assert(ArrLen(PhaseNames) == PHASE_NUMBER_OF); for (int i = 0; i < PHASE_NUMBER_OF; i++) { double phase_tot_ms = (((double)m_total.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; double phase_max_ms = (((double)m_maximum.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; #if MEASURE_CLRAPI_CALLS // Skip showing CLR API call info if we didn't collect any if (i == PHASE_CLR_API && !extraInfo) continue; #endif // Indent nested phases, according to depth. int ancPhase = PhaseParent[i]; while (ancPhase != -1) { fprintf(f, " "); ancPhase = PhaseParent[ancPhase]; } fprintf(f, " %-30s %6.2f %10.2f %9.3f %8.2f%% %8.3f", PhaseNames[i], ((double)m_total.m_invokesByPhase[i]) / ((double)m_numMethods), ((double)m_total.m_cyclesByPhase[i]) / 1000000.0, phase_tot_ms, (phase_tot_ms * 100.0 / totTime_ms), phase_max_ms); #if MEASURE_CLRAPI_CALLS if (extraInfo && i != PHASE_CLR_API) { double nest_tot_ms = (((double)m_total.m_CLRcyclesByPhase[i]) / countsPerSec) * 1000.0; double nest_percent = nest_tot_ms * 100.0 / totTime_ms; double calls_per_fn = ((double)m_total.m_CLRinvokesByPhase[i]) / ((double)m_numMethods); if (nest_percent > 0.1 || calls_per_fn > 10) fprintf(f, " %5.1f %8.2f%%", calls_per_fn, nest_percent); } #endif fprintf(f, "\n"); } // Show slop if it's over a certain percentage of the total double pslop_pct = 100.0 * m_total.m_parentPhaseEndSlop * 1000.0 / countsPerSec / totTime_ms; if (pslop_pct >= 1.0) { fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " "%3.1f%% of total.\n\n", m_total.m_parentPhaseEndSlop / 1000000.0, pslop_pct); } } if (m_numFilteredMethods > 0) { fprintf(f, " Compiled %d methods that meet the filter requirement.\n", m_numFilteredMethods); fprintf(f, " Compiled %d bytecodes total (%8.2f avg).\n", m_filtered.m_byteCodeBytes, (double)m_filtered.m_byteCodeBytes / (double)m_numFilteredMethods); double totTime_ms = ((double)m_filtered.m_totalCycles / countsPerSec) * 1000.0; fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n", ((double)m_filtered.m_totalCycles / 1000000.0), totTime_ms); fprintf(f, " avg: %10.3f Mcycles/%10.3f ms\n", ((double)m_filtered.m_totalCycles) / 1000000.0 / (double)m_numFilteredMethods, totTime_ms / (double)m_numFilteredMethods); fprintf(f, " Total time by phases:\n"); fprintf(f, " PHASE inv/meth Mcycles time (ms) %% of total\n"); fprintf(f, " --------------------------------------------------------------------------------------\n"); // Ensure that at least the names array and the Phases enum have the same number of entries: assert(ArrLen(PhaseNames) == PHASE_NUMBER_OF); for (int i = 0; i < PHASE_NUMBER_OF; i++) { double phase_tot_ms = (((double)m_filtered.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; // Indent nested phases, according to depth. int ancPhase = PhaseParent[i]; while (ancPhase != -1) { fprintf(f, " "); ancPhase = PhaseParent[ancPhase]; } fprintf(f, " %-30s %5.2f %10.2f %9.3f %8.2f%%\n", PhaseNames[i], ((double)m_filtered.m_invokesByPhase[i]) / ((double)m_numFilteredMethods), ((double)m_filtered.m_cyclesByPhase[i]) / 1000000.0, phase_tot_ms, (phase_tot_ms * 100.0 / totTime_ms)); } double fslop_ms = m_filtered.m_parentPhaseEndSlop * 1000.0 / countsPerSec; if (fslop_ms > 1.0) { fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " "%3.1f%% of total.\n\n", m_filtered.m_parentPhaseEndSlop / 1000000.0, fslop_ms); } } #if MEASURE_CLRAPI_CALLS if (m_total.m_allClrAPIcalls > 0 && m_total.m_allClrAPIcycles > 0) { fprintf(f, "\n"); if (m_totMethods > 0) fprintf(f, " Imported %u methods.\n\n", m_numMethods + m_totMethods); fprintf(f, " CLR API # calls total time max time avg time %% " "of total\n"); fprintf(f, " -------------------------------------------------------------------------------"); fprintf(f, "---------------------\n"); static const char* APInames[] = { #define DEF_CLR_API(name) #name, #include "ICorJitInfo_API_names.h" }; unsigned shownCalls = 0; double shownMillis = 0.0; #ifdef DEBUG unsigned checkedCalls = 0; double checkedMillis = 0.0; #endif for (unsigned pass = 0; pass < 2; pass++) { for (unsigned i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { unsigned calls = m_total.m_perClrAPIcalls[i]; if (calls == 0) continue; unsigned __int64 cycles = m_total.m_perClrAPIcycles[i]; double millis = 1000.0 * cycles / countsPerSec; // Don't show the small fry to keep the results manageable if (millis < 0.5) { // We always show the following API because it is always called // exactly once for each method and its body is the simplest one // possible (it just returns an integer constant), and therefore // it can be used to measure the overhead of adding the CLR API // timing code. Roughly speaking, on a 3GHz x64 box the overhead // per call should be around 40 ns when using RDTSC, compared to // about 140 ns when using GetThreadCycles() under Windows. if (i != API_ICorJitInfo_Names::API_getExpectedTargetArchitecture) continue; } // In the first pass we just compute the totals. if (pass == 0) { shownCalls += m_total.m_perClrAPIcalls[i]; shownMillis += millis; continue; } unsigned __int32 maxcyc = m_maximum.m_maxClrAPIcycles[i]; double max_ms = 1000.0 * maxcyc / countsPerSec; fprintf(f, " %-40s", APInames[i]); // API name fprintf(f, " %8u %9.1f ms", calls, millis); // #calls, total time fprintf(f, " %8.1f ms %8.1f ns", max_ms, 1000000.0 * millis / calls); // max, avg time fprintf(f, " %5.1f%%\n", 100.0 * millis / shownMillis); // % of total #ifdef DEBUG checkedCalls += m_total.m_perClrAPIcalls[i]; checkedMillis += millis; #endif } } #ifdef DEBUG assert(checkedCalls == shownCalls); assert(checkedMillis == shownMillis); #endif if (shownCalls > 0 || shownMillis > 0) { fprintf(f, " -------------------------"); fprintf(f, "---------------------------------------------------------------------------\n"); fprintf(f, " Total for calls shown above %8u %10.1f ms", shownCalls, shownMillis); if (totTime_ms > 0.0) fprintf(f, " (%4.1lf%% of overall JIT time)", shownMillis * 100.0 / totTime_ms); fprintf(f, "\n"); } fprintf(f, "\n"); } #endif fprintf(f, "\n"); } JitTimer::JitTimer(unsigned byteCodeSize) : m_info(byteCodeSize) { #if MEASURE_CLRAPI_CALLS m_CLRcallInvokes = 0; m_CLRcallCycles = 0; #endif #ifdef DEBUG m_lastPhase = (Phases)-1; #if MEASURE_CLRAPI_CALLS m_CLRcallAPInum = -1; #endif #endif unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { m_start = threadCurCycles; m_curPhaseStart = threadCurCycles; } } void JitTimer::EndPhase(Compiler* compiler, Phases phase) { // Otherwise... // We re-run some phases currently, so this following assert doesn't work. // assert((int)phase > (int)m_lastPhase); // We should end phases in increasing order. unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { unsigned __int64 phaseCycles = (threadCurCycles - m_curPhaseStart); // If this is not a leaf phase, the assumption is that the last subphase must have just recently ended. // Credit the duration to "slop", the total of which should be very small. if (PhaseHasChildren[phase]) { m_info.m_parentPhaseEndSlop += phaseCycles; } else { // It is a leaf phase. Credit duration to it. m_info.m_invokesByPhase[phase]++; m_info.m_cyclesByPhase[phase] += phaseCycles; #if MEASURE_CLRAPI_CALLS // Record the CLR API timing info as well. m_info.m_CLRinvokesByPhase[phase] += m_CLRcallInvokes; m_info.m_CLRcyclesByPhase[phase] += m_CLRcallCycles; #endif // Credit the phase's ancestors, if any. int ancPhase = PhaseParent[phase]; while (ancPhase != -1) { m_info.m_cyclesByPhase[ancPhase] += phaseCycles; ancPhase = PhaseParent[ancPhase]; } #if MEASURE_CLRAPI_CALLS const Phases lastPhase = PHASE_CLR_API; #else const Phases lastPhase = PHASE_NUMBER_OF; #endif if (phase + 1 == lastPhase) { m_info.m_totalCycles = (threadCurCycles - m_start); } else { m_curPhaseStart = threadCurCycles; } } if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[phase]) { m_info.m_nodeCountAfterPhase[phase] = compiler->fgMeasureIR(); } else { m_info.m_nodeCountAfterPhase[phase] = 0; } } #ifdef DEBUG m_lastPhase = phase; #endif #if MEASURE_CLRAPI_CALLS m_CLRcallInvokes = 0; m_CLRcallCycles = 0; #endif } #if MEASURE_CLRAPI_CALLS //------------------------------------------------------------------------ // JitTimer::CLRApiCallEnter: Start the stopwatch for an EE call. // // Arguments: // apix - The API index - an "enum API_ICorJitInfo_Names" value. // void JitTimer::CLRApiCallEnter(unsigned apix) { assert(m_CLRcallAPInum == -1); // Nested calls not allowed m_CLRcallAPInum = apix; // If we can't get the cycles, we'll just ignore this call if (!_our_GetThreadCycles(&m_CLRcallStart)) m_CLRcallStart = 0; } //------------------------------------------------------------------------ // JitTimer::CLRApiCallLeave: compute / record time spent in an EE call. // // Arguments: // apix - The API's "enum API_ICorJitInfo_Names" value; this value // should match the value passed to the most recent call to // "CLRApiCallEnter" (i.e. these must come as matched pairs), // and they also may not nest. // void JitTimer::CLRApiCallLeave(unsigned apix) { // Make sure we're actually inside a measured CLR call. assert(m_CLRcallAPInum != -1); m_CLRcallAPInum = -1; // Ignore this one if we don't have a valid starting counter. if (m_CLRcallStart != 0) { if (JitConfig.JitEECallTimingInfo() != 0) { unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { // Compute the cycles spent in the call. threadCurCycles -= m_CLRcallStart; // Add the cycles to the 'phase' and bump its use count. m_info.m_cyclesByPhase[PHASE_CLR_API] += threadCurCycles; m_info.m_invokesByPhase[PHASE_CLR_API] += 1; // Add the values to the "per API" info. m_info.m_allClrAPIcycles += threadCurCycles; m_info.m_allClrAPIcalls += 1; m_info.m_perClrAPIcalls[apix] += 1; m_info.m_perClrAPIcycles[apix] += threadCurCycles; m_info.m_maxClrAPIcycles[apix] = max(m_info.m_maxClrAPIcycles[apix], (unsigned __int32)threadCurCycles); // Subtract the cycles from the enclosing phase by bumping its start time m_curPhaseStart += threadCurCycles; // Update the running totals. m_CLRcallInvokes += 1; m_CLRcallCycles += threadCurCycles; } } m_CLRcallStart = 0; } assert(m_CLRcallAPInum != -1); // No longer in this API call. m_CLRcallAPInum = -1; } #endif // MEASURE_CLRAPI_CALLS CritSecObject JitTimer::s_csvLock; // It's expensive to constantly open and close the file, so open it once and close it // when the process exits. This should be accessed under the s_csvLock. FILE* JitTimer::s_csvFile = nullptr; LPCWSTR Compiler::JitTimeLogCsv() { LPCWSTR jitTimeLogCsv = JitConfig.JitTimeLogCsv(); return jitTimeLogCsv; } void JitTimer::PrintCsvHeader() { LPCWSTR jitTimeLogCsv = Compiler::JitTimeLogCsv(); if (jitTimeLogCsv == nullptr) { return; } CritSecHolder csvLock(s_csvLock); if (s_csvFile == nullptr) { s_csvFile = _wfopen(jitTimeLogCsv, W("a")); } if (s_csvFile != nullptr) { // Seek to the end of the file s.t. `ftell` doesn't lie to us on Windows fseek(s_csvFile, 0, SEEK_END); // Write the header if the file is empty if (ftell(s_csvFile) == 0) { fprintf(s_csvFile, "\"Method Name\","); fprintf(s_csvFile, "\"Assembly or SPMI Index\","); fprintf(s_csvFile, "\"IL Bytes\","); fprintf(s_csvFile, "\"Basic Blocks\","); fprintf(s_csvFile, "\"Min Opts\","); fprintf(s_csvFile, "\"Loops\","); fprintf(s_csvFile, "\"Loops Cloned\","); #if FEATURE_LOOP_ALIGN #ifdef DEBUG fprintf(s_csvFile, "\"Alignment Candidates\","); fprintf(s_csvFile, "\"Loops Aligned\","); #endif // DEBUG #endif // FEATURE_LOOP_ALIGN for (int i = 0; i < PHASE_NUMBER_OF; i++) { fprintf(s_csvFile, "\"%s\",", PhaseNames[i]); if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[i]) { fprintf(s_csvFile, "\"Node Count After %s\",", PhaseNames[i]); } } InlineStrategy::DumpCsvHeader(s_csvFile); fprintf(s_csvFile, "\"Executable Code Bytes\","); fprintf(s_csvFile, "\"GC Info Bytes\","); fprintf(s_csvFile, "\"Total Bytes Allocated\","); fprintf(s_csvFile, "\"Total Cycles\","); fprintf(s_csvFile, "\"CPS\"\n"); fflush(s_csvFile); } } } void JitTimer::PrintCsvMethodStats(Compiler* comp) { LPCWSTR jitTimeLogCsv = Compiler::JitTimeLogCsv(); if (jitTimeLogCsv == nullptr) { return; } // eeGetMethodFullName uses locks, so don't enter crit sec before this call. #if defined(DEBUG) || defined(LATE_DISASM) // If we already have computed the name because for some reason we're generating the CSV // for a DEBUG build (presumably not for the time info), just re-use it. const char* methName = comp->info.compFullName; #else const char* methName = comp->eeGetMethodFullName(comp->info.compMethodHnd); #endif // Try and access the SPMI index to report in the data set. // // If the jit is not hosted under SPMI this will return the // default value of zero. // // Query the jit host directly here instead of going via the // config cache, since value will change for each method. int index = g_jitHost->getIntConfigValue(W("SuperPMIMethodContextNumber"), -1); CritSecHolder csvLock(s_csvLock); if (s_csvFile == nullptr) { return; } fprintf(s_csvFile, "\"%s\",", methName); if (index != 0) { fprintf(s_csvFile, "%d,", index); } else { const char* methodAssemblyName = comp->info.compCompHnd->getAssemblyName( comp->info.compCompHnd->getModuleAssembly(comp->info.compCompHnd->getClassModule(comp->info.compClassHnd))); fprintf(s_csvFile, "\"%s\",", methodAssemblyName); } fprintf(s_csvFile, "%u,", comp->info.compILCodeSize); fprintf(s_csvFile, "%u,", comp->fgBBcount); fprintf(s_csvFile, "%u,", comp->opts.MinOpts()); fprintf(s_csvFile, "%u,", comp->optLoopCount); fprintf(s_csvFile, "%u,", comp->optLoopsCloned); #if FEATURE_LOOP_ALIGN #ifdef DEBUG fprintf(s_csvFile, "%u,", comp->loopAlignCandidates); fprintf(s_csvFile, "%u,", comp->loopsAligned); #endif // DEBUG #endif // FEATURE_LOOP_ALIGN unsigned __int64 totCycles = 0; for (int i = 0; i < PHASE_NUMBER_OF; i++) { if (!PhaseHasChildren[i]) { totCycles += m_info.m_cyclesByPhase[i]; } fprintf(s_csvFile, "%I64u,", m_info.m_cyclesByPhase[i]); if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[i]) { fprintf(s_csvFile, "%u,", m_info.m_nodeCountAfterPhase[i]); } } comp->m_inlineStrategy->DumpCsvData(s_csvFile); fprintf(s_csvFile, "%u,", comp->info.compNativeCodeSize); fprintf(s_csvFile, "%Iu,", comp->compInfoBlkSize); fprintf(s_csvFile, "%Iu,", comp->compGetArenaAllocator()->getTotalBytesAllocated()); fprintf(s_csvFile, "%I64u,", m_info.m_totalCycles); fprintf(s_csvFile, "%f\n", CachedCyclesPerSecond()); fflush(s_csvFile); } // Perform process shutdown actions. // // static void JitTimer::Shutdown() { CritSecHolder csvLock(s_csvLock); if (s_csvFile != nullptr) { fclose(s_csvFile); } } // Completes the timing of the current method, and adds it to "sum". void JitTimer::Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases) { if (includePhases) { PrintCsvMethodStats(comp); } sum.AddInfo(m_info, includePhases); } #endif // FEATURE_JIT_METHOD_PERF #if LOOP_HOIST_STATS // Static fields. CritSecObject Compiler::s_loopHoistStatsLock; // Default constructor. unsigned Compiler::s_loopsConsidered = 0; unsigned Compiler::s_loopsWithHoistedExpressions = 0; unsigned Compiler::s_totalHoistedExpressions = 0; // static void Compiler::PrintAggregateLoopHoistStats(FILE* f) { fprintf(f, "\n"); fprintf(f, "---------------------------------------------------\n"); fprintf(f, "Loop hoisting stats\n"); fprintf(f, "---------------------------------------------------\n"); double pctWithHoisted = 0.0; if (s_loopsConsidered > 0) { pctWithHoisted = 100.0 * (double(s_loopsWithHoistedExpressions) / double(s_loopsConsidered)); } double exprsPerLoopWithExpr = 0.0; if (s_loopsWithHoistedExpressions > 0) { exprsPerLoopWithExpr = double(s_totalHoistedExpressions) / double(s_loopsWithHoistedExpressions); } fprintf(f, "Considered %d loops. Of these, we hoisted expressions out of %d (%6.2f%%).\n", s_loopsConsidered, s_loopsWithHoistedExpressions, pctWithHoisted); fprintf(f, " A total of %d expressions were hoisted, an average of %5.2f per loop-with-hoisted-expr.\n", s_totalHoistedExpressions, exprsPerLoopWithExpr); } void Compiler::AddLoopHoistStats() { CritSecHolder statsLock(s_loopHoistStatsLock); s_loopsConsidered += m_loopsConsidered; s_loopsWithHoistedExpressions += m_loopsWithHoistedExpressions; s_totalHoistedExpressions += m_totalHoistedExpressions; } void Compiler::PrintPerMethodLoopHoistStats() { double pctWithHoisted = 0.0; if (m_loopsConsidered > 0) { pctWithHoisted = 100.0 * (double(m_loopsWithHoistedExpressions) / double(m_loopsConsidered)); } double exprsPerLoopWithExpr = 0.0; if (m_loopsWithHoistedExpressions > 0) { exprsPerLoopWithExpr = double(m_totalHoistedExpressions) / double(m_loopsWithHoistedExpressions); } printf("Considered %d loops. Of these, we hoisted expressions out of %d (%5.2f%%).\n", m_loopsConsidered, m_loopsWithHoistedExpressions, pctWithHoisted); printf(" A total of %d expressions were hoisted, an average of %5.2f per loop-with-hoisted-expr.\n", m_totalHoistedExpressions, exprsPerLoopWithExpr); } #endif // LOOP_HOIST_STATS //------------------------------------------------------------------------ // RecordStateAtEndOfInlining: capture timing data (if enabled) after // inlining as completed. // // Note: // Records data needed for SQM and inlining data dumps. Should be // called after inlining is complete. (We do this after inlining // because this marks the last point at which the JIT is likely to // cause type-loading and class initialization). void Compiler::RecordStateAtEndOfInlining() { #if defined(DEBUG) || defined(INLINE_DATA) m_compCyclesAtEndOfInlining = 0; m_compTickCountAtEndOfInlining = 0; bool b = CycleTimer::GetThreadCyclesS(&m_compCyclesAtEndOfInlining); if (!b) { return; // We don't have a thread cycle counter. } m_compTickCountAtEndOfInlining = GetTickCount(); #endif // defined(DEBUG) || defined(INLINE_DATA) } //------------------------------------------------------------------------ // RecordStateAtEndOfCompilation: capture timing data (if enabled) after // compilation is completed. void Compiler::RecordStateAtEndOfCompilation() { #if defined(DEBUG) || defined(INLINE_DATA) // Common portion m_compCycles = 0; unsigned __int64 compCyclesAtEnd; bool b = CycleTimer::GetThreadCyclesS(&compCyclesAtEnd); if (!b) { return; // We don't have a thread cycle counter. } assert(compCyclesAtEnd >= m_compCyclesAtEndOfInlining); m_compCycles = compCyclesAtEnd - m_compCyclesAtEndOfInlining; #endif // defined(DEBUG) || defined(INLINE_DATA) } #if FUNC_INFO_LOGGING // static LPCWSTR Compiler::compJitFuncInfoFilename = nullptr; // static FILE* Compiler::compJitFuncInfoFile = nullptr; #endif // FUNC_INFO_LOGGING #ifdef DEBUG // dumpConvertedVarSet() dumps the varset bits that are tracked // variable indices, and we convert them to variable numbers, sort the variable numbers, and // print them as variable numbers. To do this, we use a temporary set indexed by // variable number. We can't use the "all varset" type because it is still size-limited, and might // not be big enough to handle all possible variable numbers. void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars) { BYTE* pVarNumSet; // trivial set: one byte per varNum, 0 means not in set, 1 means in set. size_t varNumSetBytes = comp->lvaCount * sizeof(BYTE); pVarNumSet = (BYTE*)_alloca(varNumSetBytes); memset(pVarNumSet, 0, varNumSetBytes); // empty the set VarSetOps::Iter iter(comp, vars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { unsigned varNum = comp->lvaTrackedIndexToLclNum(varIndex); pVarNumSet[varNum] = 1; // This varNum is in the set } bool first = true; printf("{"); for (size_t varNum = 0; varNum < comp->lvaCount; varNum++) { if (pVarNumSet[varNum] == 1) { if (!first) { printf(" "); } printf("V%02u", varNum); first = false; } } printf("}"); } /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Debugging helpers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ /* The following functions are intended to be called from the debugger, to dump * various data structures. * * The versions that start with 'c' take a Compiler* as the first argument. * The versions that start with 'd' use the tlsCompiler, so don't require a Compiler*. * * Summary: * cBlock, dBlock : Display a basic block (call fgTableDispBasicBlock()). * cBlocks, dBlocks : Display all the basic blocks of a function (call fgDispBasicBlocks()). * cBlocksV, dBlocksV : Display all the basic blocks of a function (call fgDispBasicBlocks(true)). * "V" means "verbose", and will dump all the trees. * cStmt, dStmt : Display a Statement (call gtDispStmt()). * cTree, dTree : Display a tree (call gtDispTree()). * cTreeLIR, dTreeLIR : Display a tree in LIR form (call gtDispLIRNode()). * cTrees, dTrees : Display all the trees in a function (call fgDumpTrees()). * cEH, dEH : Display the EH handler table (call fgDispHandlerTab()). * cVar, dVar : Display a local variable given its number (call lvaDumpEntry()). * cVarDsc, dVarDsc : Display a local variable given a LclVarDsc* (call lvaDumpEntry()). * cVars, dVars : Display the local variable table (call lvaTableDump()). * cVarsFinal, dVarsFinal : Display the local variable table (call lvaTableDump(FINAL_FRAME_LAYOUT)). * cBlockCheapPreds, dBlockCheapPreds : Display a block's cheap predecessors (call block->dspCheapPreds()). * cBlockPreds, dBlockPreds : Display a block's predecessors (call block->dspPreds()). * cBlockSuccs, dBlockSuccs : Display a block's successors (call block->dspSuccs(compiler)). * cReach, dReach : Display all block reachability (call fgDispReach()). * cDoms, dDoms : Display all block dominators (call fgDispDoms()). * cLiveness, dLiveness : Display per-block variable liveness (call fgDispBBLiveness()). * cCVarSet, dCVarSet : Display a "converted" VARSET_TP: the varset is assumed to be tracked variable * indices. These are converted to variable numbers and sorted. (Calls * dumpConvertedVarSet()). * cLoop, dLoop : Display the blocks of a loop, including the trees. * cTreeFlags, dTreeFlags : Display tree flags * * The following don't require a Compiler* to work: * dRegMask : Display a regMaskTP (call dspRegMask(mask)). * dBlockList : Display a BasicBlockList*. */ void cBlock(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Block %u\n", sequenceNumber++); comp->fgTableDispBasicBlock(block); } void cBlocks(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Blocks %u\n", sequenceNumber++); comp->fgDispBasicBlocks(); } void cBlocksV(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlocksV %u\n", sequenceNumber++); comp->fgDispBasicBlocks(true); } void cStmt(Compiler* comp, Statement* statement) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Stmt %u\n", sequenceNumber++); comp->gtDispStmt(statement, ">>>"); } void cTree(Compiler* comp, GenTree* tree) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Tree %u\n", sequenceNumber++); comp->gtDispTree(tree, nullptr, ">>>"); } void cTreeLIR(Compiler* comp, GenTree* tree) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *TreeLIR %u\n", sequenceNumber++); comp->gtDispLIRNode(tree); } void cTrees(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Trees %u\n", sequenceNumber++); comp->fgDumpTrees(comp->fgFirstBB, nullptr); } void cEH(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *EH %u\n", sequenceNumber++); comp->fgDispHandlerTab(); } void cVar(Compiler* comp, unsigned lclNum) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Var %u\n", sequenceNumber++); comp->lvaDumpEntry(lclNum, Compiler::FINAL_FRAME_LAYOUT); } void cVarDsc(Compiler* comp, LclVarDsc* varDsc) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *VarDsc %u\n", sequenceNumber++); unsigned lclNum = comp->lvaGetLclNum(varDsc); comp->lvaDumpEntry(lclNum, Compiler::FINAL_FRAME_LAYOUT); } void cVars(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Vars %u\n", sequenceNumber++); comp->lvaTableDump(); } void cVarsFinal(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Vars %u\n", sequenceNumber++); comp->lvaTableDump(Compiler::FINAL_FRAME_LAYOUT); } void cBlockCheapPreds(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockCheapPreds %u\n", sequenceNumber++); block->dspCheapPreds(); } void cBlockPreds(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockPreds %u\n", sequenceNumber++); block->dspPreds(); } void cBlockSuccs(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockSuccs %u\n", sequenceNumber++); block->dspSuccs(comp); } void cReach(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Reach %u\n", sequenceNumber++); comp->fgDispReach(); } void cDoms(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Doms %u\n", sequenceNumber++); comp->fgDispDoms(); } void cLiveness(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Liveness %u\n", sequenceNumber++); comp->fgDispBBLiveness(); } void cCVarSet(Compiler* comp, VARSET_VALARG_TP vars) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *CVarSet %u\n", sequenceNumber++); dumpConvertedVarSet(comp, vars); printf("\n"); // dumpConvertedVarSet() doesn't emit a trailing newline } void cLoop(Compiler* comp, unsigned loopNum) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Loop %u\n", sequenceNumber++); comp->optPrintLoopInfo(loopNum, /* verbose */ true); printf("\n"); } void cLoopPtr(Compiler* comp, const Compiler::LoopDsc* loop) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *LoopPtr %u\n", sequenceNumber++); comp->optPrintLoopInfo(loop, /* verbose */ true); printf("\n"); } void cLoops(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Loops %u\n", sequenceNumber++); comp->optPrintLoopTable(); } void dBlock(BasicBlock* block) { cBlock(JitTls::GetCompiler(), block); } void dBlocks() { cBlocks(JitTls::GetCompiler()); } void dBlocksV() { cBlocksV(JitTls::GetCompiler()); } void dStmt(Statement* statement) { cStmt(JitTls::GetCompiler(), statement); } void dTree(GenTree* tree) { cTree(JitTls::GetCompiler(), tree); } void dTreeLIR(GenTree* tree) { cTreeLIR(JitTls::GetCompiler(), tree); } void dTreeRange(GenTree* first, GenTree* last) { Compiler* comp = JitTls::GetCompiler(); GenTree* cur = first; while (true) { cTreeLIR(comp, cur); if (cur == last) break; cur = cur->gtNext; } } void dTrees() { cTrees(JitTls::GetCompiler()); } void dEH() { cEH(JitTls::GetCompiler()); } void dVar(unsigned lclNum) { cVar(JitTls::GetCompiler(), lclNum); } void dVarDsc(LclVarDsc* varDsc) { cVarDsc(JitTls::GetCompiler(), varDsc); } void dVars() { cVars(JitTls::GetCompiler()); } void dVarsFinal() { cVarsFinal(JitTls::GetCompiler()); } void dBlockPreds(BasicBlock* block) { cBlockPreds(JitTls::GetCompiler(), block); } void dBlockCheapPreds(BasicBlock* block) { cBlockCheapPreds(JitTls::GetCompiler(), block); } void dBlockSuccs(BasicBlock* block) { cBlockSuccs(JitTls::GetCompiler(), block); } void dReach() { cReach(JitTls::GetCompiler()); } void dDoms() { cDoms(JitTls::GetCompiler()); } void dLiveness() { cLiveness(JitTls::GetCompiler()); } void dCVarSet(VARSET_VALARG_TP vars) { cCVarSet(JitTls::GetCompiler(), vars); } void dLoop(unsigned loopNum) { cLoop(JitTls::GetCompiler(), loopNum); } void dLoopPtr(const Compiler::LoopDsc* loop) { cLoopPtr(JitTls::GetCompiler(), loop); } void dLoops() { cLoops(JitTls::GetCompiler()); } void dRegMask(regMaskTP mask) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== dRegMask %u\n", sequenceNumber++); dspRegMask(mask); printf("\n"); // dspRegMask() doesn't emit a trailing newline } void dBlockList(BasicBlockList* list) { printf("WorkList: "); while (list != nullptr) { printf(FMT_BB " ", list->block->bbNum); list = list->next; } printf("\n"); } // Global variables available in debug mode. That are set by debug APIs for finding // Trees, Stmts, and/or Blocks using id or bbNum. // That can be used in watch window or as a way to get address of fields for data break points. GenTree* dbTree; Statement* dbStmt; BasicBlock* dbTreeBlock; BasicBlock* dbBlock; // Debug APIs for finding Trees, Stmts, and/or Blocks. // As a side effect, they set the debug variables above. GenTree* dFindTree(GenTree* tree, unsigned id) { if (tree == nullptr) { return nullptr; } if (tree->gtTreeID == id) { dbTree = tree; return tree; } GenTree* child = nullptr; tree->VisitOperands([&child, id](GenTree* operand) -> GenTree::VisitResult { child = dFindTree(child, id); return (child != nullptr) ? GenTree::VisitResult::Abort : GenTree::VisitResult::Continue; }); return child; } GenTree* dFindTree(unsigned id) { Compiler* comp = JitTls::GetCompiler(); GenTree* tree; dbTreeBlock = nullptr; dbTree = nullptr; for (BasicBlock* const block : comp->Blocks()) { for (Statement* const stmt : block->Statements()) { tree = dFindTree(stmt->GetRootNode(), id); if (tree != nullptr) { dbTreeBlock = block; return tree; } } } return nullptr; } Statement* dFindStmt(unsigned id) { Compiler* comp = JitTls::GetCompiler(); dbStmt = nullptr; unsigned stmtId = 0; for (BasicBlock* const block : comp->Blocks()) { for (Statement* const stmt : block->Statements()) { stmtId++; if (stmtId == id) { dbStmt = stmt; return stmt; } } } return nullptr; } BasicBlock* dFindBlock(unsigned bbNum) { Compiler* comp = JitTls::GetCompiler(); BasicBlock* block = nullptr; dbBlock = nullptr; for (block = comp->fgFirstBB; block != nullptr; block = block->bbNext) { if (block->bbNum == bbNum) { dbBlock = block; break; } } return block; } Compiler::LoopDsc* dFindLoop(unsigned loopNum) { Compiler* comp = JitTls::GetCompiler(); if (loopNum >= comp->optLoopCount) { printf("loopNum %u out of range\n"); return nullptr; } return &comp->optLoopTable[loopNum]; } void cTreeFlags(Compiler* comp, GenTree* tree) { int chars = 0; if (tree->gtFlags != 0) { chars += printf("flags="); // Node flags CLANG_FORMAT_COMMENT_ANCHOR; #if defined(DEBUG) if (tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) { chars += printf("[NODE_LARGE]"); } if (tree->gtDebugFlags & GTF_DEBUG_NODE_SMALL) { chars += printf("[NODE_SMALL]"); } if (tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) { chars += printf("[MORPHED]"); } #endif // defined(DEBUG) if (tree->gtFlags & GTF_COLON_COND) { chars += printf("[COLON_COND]"); } // Operator flags genTreeOps op = tree->OperGet(); switch (op) { case GT_LCL_VAR: case GT_LCL_VAR_ADDR: case GT_LCL_FLD: case GT_LCL_FLD_ADDR: case GT_STORE_LCL_FLD: case GT_STORE_LCL_VAR: if (tree->gtFlags & GTF_VAR_DEF) { chars += printf("[VAR_DEF]"); } if (tree->gtFlags & GTF_VAR_USEASG) { chars += printf("[VAR_USEASG]"); } if (tree->gtFlags & GTF_VAR_CAST) { chars += printf("[VAR_CAST]"); } if (tree->gtFlags & GTF_VAR_ITERATOR) { chars += printf("[VAR_ITERATOR]"); } if (tree->gtFlags & GTF_VAR_CLONED) { chars += printf("[VAR_CLONED]"); } if (tree->gtFlags & GTF_VAR_DEATH) { chars += printf("[VAR_DEATH]"); } if (tree->gtFlags & GTF_VAR_ARR_INDEX) { chars += printf("[VAR_ARR_INDEX]"); } #if defined(DEBUG) if (tree->gtDebugFlags & GTF_DEBUG_VAR_CSE_REF) { chars += printf("[VAR_CSE_REF]"); } #endif break; case GT_NO_OP: break; case GT_FIELD: if (tree->gtFlags & GTF_FLD_VOLATILE) { chars += printf("[FLD_VOLATILE]"); } break; case GT_INDEX: if (tree->gtFlags & GTF_INX_STRING_LAYOUT) { chars += printf("[INX_STRING_LAYOUT]"); } FALLTHROUGH; case GT_INDEX_ADDR: if (tree->gtFlags & GTF_INX_RNGCHK) { chars += printf("[INX_RNGCHK]"); } break; case GT_IND: case GT_STOREIND: if (tree->gtFlags & GTF_IND_VOLATILE) { chars += printf("[IND_VOLATILE]"); } if (tree->gtFlags & GTF_IND_TGTANYWHERE) { chars += printf("[IND_TGTANYWHERE]"); } if (tree->gtFlags & GTF_IND_TGT_NOT_HEAP) { chars += printf("[IND_TGT_NOT_HEAP]"); } if (tree->gtFlags & GTF_IND_TLS_REF) { chars += printf("[IND_TLS_REF]"); } if (tree->gtFlags & GTF_IND_ASG_LHS) { chars += printf("[IND_ASG_LHS]"); } if (tree->gtFlags & GTF_IND_UNALIGNED) { chars += printf("[IND_UNALIGNED]"); } if (tree->gtFlags & GTF_IND_INVARIANT) { chars += printf("[IND_INVARIANT]"); } if (tree->gtFlags & GTF_IND_NONNULL) { chars += printf("[IND_NONNULL]"); } break; case GT_CLS_VAR: if (tree->gtFlags & GTF_CLS_VAR_ASG_LHS) { chars += printf("[CLS_VAR_ASG_LHS]"); } break; case GT_MUL: #if !defined(TARGET_64BIT) case GT_MUL_LONG: #endif if (tree->gtFlags & GTF_MUL_64RSLT) { chars += printf("[64RSLT]"); } if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_ADD: if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_LSH: if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_MOD: case GT_UMOD: break; case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GT: case GT_GE: if (tree->gtFlags & GTF_RELOP_NAN_UN) { chars += printf("[RELOP_NAN_UN]"); } if (tree->gtFlags & GTF_RELOP_JMP_USED) { chars += printf("[RELOP_JMP_USED]"); } break; case GT_QMARK: if (tree->gtFlags & GTF_QMARK_CAST_INSTOF) { chars += printf("[QMARK_CAST_INSTOF]"); } break; case GT_BOX: if (tree->gtFlags & GTF_BOX_VALUE) { chars += printf("[BOX_VALUE]"); } break; case GT_CNS_INT: { GenTreeFlags handleKind = (tree->gtFlags & GTF_ICON_HDL_MASK); switch (handleKind) { case GTF_ICON_SCOPE_HDL: chars += printf("[ICON_SCOPE_HDL]"); break; case GTF_ICON_CLASS_HDL: chars += printf("[ICON_CLASS_HDL]"); break; case GTF_ICON_METHOD_HDL: chars += printf("[ICON_METHOD_HDL]"); break; case GTF_ICON_FIELD_HDL: chars += printf("[ICON_FIELD_HDL]"); break; case GTF_ICON_STATIC_HDL: chars += printf("[ICON_STATIC_HDL]"); break; case GTF_ICON_STR_HDL: chars += printf("[ICON_STR_HDL]"); break; case GTF_ICON_CONST_PTR: chars += printf("[ICON_CONST_PTR]"); break; case GTF_ICON_GLOBAL_PTR: chars += printf("[ICON_GLOBAL_PTR]"); break; case GTF_ICON_VARG_HDL: chars += printf("[ICON_VARG_HDL]"); break; case GTF_ICON_PINVKI_HDL: chars += printf("[ICON_PINVKI_HDL]"); break; case GTF_ICON_TOKEN_HDL: chars += printf("[ICON_TOKEN_HDL]"); break; case GTF_ICON_TLS_HDL: chars += printf("[ICON_TLD_HDL]"); break; case GTF_ICON_FTN_ADDR: chars += printf("[ICON_FTN_ADDR]"); break; case GTF_ICON_CIDMID_HDL: chars += printf("[ICON_CIDMID_HDL]"); break; case GTF_ICON_BBC_PTR: chars += printf("[ICON_BBC_PTR]"); break; case GTF_ICON_STATIC_BOX_PTR: chars += printf("[GTF_ICON_STATIC_BOX_PTR]"); break; case GTF_ICON_FIELD_OFF: chars += printf("[ICON_FIELD_OFF]"); break; default: assert(!"a forgotten handle flag"); break; } } break; case GT_OBJ: case GT_STORE_OBJ: if (tree->AsObj()->GetLayout()->HasGCPtr()) { chars += printf("[BLK_HASGCPTR]"); } FALLTHROUGH; case GT_BLK: case GT_STORE_BLK: case GT_STORE_DYN_BLK: if (tree->gtFlags & GTF_BLK_VOLATILE) { chars += printf("[BLK_VOLATILE]"); } if (tree->AsBlk()->IsUnaligned()) { chars += printf("[BLK_UNALIGNED]"); } break; case GT_CALL: if (tree->gtFlags & GTF_CALL_UNMANAGED) { chars += printf("[CALL_UNMANAGED]"); } if (tree->gtFlags & GTF_CALL_INLINE_CANDIDATE) { chars += printf("[CALL_INLINE_CANDIDATE]"); } if (!tree->AsCall()->IsVirtual()) { chars += printf("[CALL_NONVIRT]"); } if (tree->AsCall()->IsVirtualVtable()) { chars += printf("[CALL_VIRT_VTABLE]"); } if (tree->AsCall()->IsVirtualStub()) { chars += printf("[CALL_VIRT_STUB]"); } if (tree->gtFlags & GTF_CALL_NULLCHECK) { chars += printf("[CALL_NULLCHECK]"); } if (tree->gtFlags & GTF_CALL_POP_ARGS) { chars += printf("[CALL_POP_ARGS]"); } if (tree->gtFlags & GTF_CALL_HOISTABLE) { chars += printf("[CALL_HOISTABLE]"); } // More flags associated with calls. { GenTreeCall* call = tree->AsCall(); if (call->gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) { chars += printf("[CALL_M_EXPLICIT_TAILCALL]"); } if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL) { chars += printf("[CALL_M_TAILCALL]"); } if (call->gtCallMoreFlags & GTF_CALL_M_VARARGS) { chars += printf("[CALL_M_VARARGS]"); } if (call->gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) { chars += printf("[CALL_M_RETBUFFARG]"); } if (call->gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) { chars += printf("[CALL_M_DELEGATE_INV]"); } if (call->gtCallMoreFlags & GTF_CALL_M_NOGCCHECK) { chars += printf("[CALL_M_NOGCCHECK]"); } if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { chars += printf("[CALL_M_SPECIAL_INTRINSIC]"); } if (call->IsUnmanaged()) { if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { chars += printf("[CALL_M_UNMGD_THISCALL]"); } } else if (call->IsVirtualStub()) { if (call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) { chars += printf("[CALL_M_VIRTSTUB_REL_INDIRECT]"); } } else if (!call->IsVirtual()) { if (call->gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) { chars += printf("[CALL_M_NONVIRT_SAME_THIS]"); } } if (call->gtCallMoreFlags & GTF_CALL_M_FRAME_VAR_DEATH) { chars += printf("[CALL_M_FRAME_VAR_DEATH]"); } if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER) { chars += printf("[CALL_M_TAILCALL_VIA_JIT_HELPER]"); } #if FEATURE_TAILCALL_OPT if (call->gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) { chars += printf("[CALL_M_IMPLICIT_TAILCALL]"); } #endif if (call->gtCallMoreFlags & GTF_CALL_M_PINVOKE) { chars += printf("[CALL_M_PINVOKE]"); } if (call->IsFatPointerCandidate()) { chars += printf("[CALL_FAT_POINTER_CANDIDATE]"); } if (call->IsGuarded()) { chars += printf("[CALL_GUARDED]"); } if (call->IsExpRuntimeLookup()) { chars += printf("[CALL_EXP_RUNTIME_LOOKUP]"); } } break; default: { GenTreeFlags flags = (tree->gtFlags & (~(GTF_COMMON_MASK | GTF_OVERFLOW))); if (flags != 0) { chars += printf("[%08X]", flags); } } break; } // Common flags. if (tree->gtFlags & GTF_ASG) { chars += printf("[ASG]"); } if (tree->gtFlags & GTF_CALL) { chars += printf("[CALL]"); } switch (op) { case GT_MUL: case GT_CAST: case GT_ADD: case GT_SUB: if (tree->gtFlags & GTF_OVERFLOW) { chars += printf("[OVERFLOW]"); } break; default: break; } if (tree->gtFlags & GTF_EXCEPT) { chars += printf("[EXCEPT]"); } if (tree->gtFlags & GTF_GLOB_REF) { chars += printf("[GLOB_REF]"); } if (tree->gtFlags & GTF_ORDER_SIDEEFF) { chars += printf("[ORDER_SIDEEFF]"); } if (tree->gtFlags & GTF_REVERSE_OPS) { if (op != GT_LCL_VAR) { chars += printf("[REVERSE_OPS]"); } } if (tree->gtFlags & GTF_SPILLED) { chars += printf("[SPILLED_OPER]"); } #if FEATURE_SET_FLAGS if (tree->gtFlags & GTF_SET_FLAGS) { if ((op != GT_IND) && (op != GT_STOREIND)) { chars += printf("[ZSF_SET_FLAGS]"); } } #endif if (tree->gtFlags & GTF_IND_NONFAULTING) { if (tree->OperIsIndirOrArrLength()) { chars += printf("[IND_NONFAULTING]"); } } if (tree->gtFlags & GTF_MAKE_CSE) { chars += printf("[MAKE_CSE]"); } if (tree->gtFlags & GTF_DONT_CSE) { chars += printf("[DONT_CSE]"); } if (tree->gtFlags & GTF_BOOLEAN) { chars += printf("[BOOLEAN]"); } if (tree->gtFlags & GTF_UNSIGNED) { chars += printf("[SMALL_UNSIGNED]"); } if (tree->gtFlags & GTF_LATE_ARG) { chars += printf("[SMALL_LATE_ARG]"); } if (tree->gtFlags & GTF_SPILL) { chars += printf("[SPILL]"); } if (tree->gtFlags & GTF_REUSE_REG_VAL) { if (op == GT_CNS_INT) { chars += printf("[REUSE_REG_VAL]"); } } } } void dTreeFlags(GenTree* tree) { cTreeFlags(JitTls::GetCompiler(), tree); } #endif // DEBUG #if VARSET_COUNTOPS // static BitSetSupport::BitSetOpCounter Compiler::m_varsetOpCounter("VarSetOpCounts.log"); #endif #if ALLVARSET_COUNTOPS // static BitSetSupport::BitSetOpCounter Compiler::m_allvarsetOpCounter("AllVarSetOpCounts.log"); #endif // static HelperCallProperties Compiler::s_helperCallProperties; /*****************************************************************************/ /*****************************************************************************/ //------------------------------------------------------------------------ // killGCRefs: // Given some tree node return does it need all GC refs to be spilled from // callee save registers. // // Arguments: // tree - the tree for which we ask about gc refs. // // Return Value: // true - tree kills GC refs on callee save registers // false - tree doesn't affect GC refs on callee save registers bool Compiler::killGCRefs(GenTree* tree) { if (tree->IsCall()) { GenTreeCall* call = tree->AsCall(); if (call->IsUnmanaged()) { return true; } if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_JIT_PINVOKE_BEGIN)) { assert(opts.ShouldUsePInvokeHelpers()); return true; } } else if (tree->OperIs(GT_START_PREEMPTGC)) { return true; } return false; } //------------------------------------------------------------------------ // lvaIsOSRLocal: check if this local var is one that requires special // treatment for OSR compilations. // // Arguments: // varNum - variable of interest // // Return Value: // true - this is an OSR compile and this local requires special treatment // false - not an OSR compile, or not an interesting local for OSR bool Compiler::lvaIsOSRLocal(unsigned varNum) { if (!opts.IsOSR()) { return false; } if (varNum < info.compLocalsCount) { return true; } LclVarDsc* varDsc = lvaGetDesc(varNum); if (varDsc->lvIsStructField) { return (varDsc->lvParentLcl < info.compLocalsCount); } return false; } //------------------------------------------------------------------------------ // gtTypeForNullCheck: helper to get the most optimal and correct type for nullcheck // // Arguments: // tree - the node for nullcheck; // var_types Compiler::gtTypeForNullCheck(GenTree* tree) { if (varTypeIsArithmetic(tree)) { #if defined(TARGET_XARCH) // Just an optimization for XARCH - smaller mov if (varTypeIsLong(tree)) { return TYP_INT; } #endif return tree->TypeGet(); } // for the rest: probe a single byte to avoid potential AVEs return TYP_BYTE; } //------------------------------------------------------------------------------ // gtChangeOperToNullCheck: helper to change tree oper to a NULLCHECK. // // Arguments: // tree - the node to change; // basicBlock - basic block of the node. // // Notes: // the function should not be called after lowering for platforms that do not support // emitting NULLCHECK nodes, like arm32. Use `Lowering::TransformUnusedIndirection` // that handles it and calls this function when appropriate. // void Compiler::gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block) { assert(tree->OperIs(GT_FIELD, GT_IND, GT_OBJ, GT_BLK)); tree->ChangeOper(GT_NULLCHECK); tree->ChangeType(gtTypeForNullCheck(tree)); block->bbFlags |= BBF_HAS_NULLCHECK; optMethodFlags |= OMF_HAS_NULLCHECK; } #if defined(DEBUG) //------------------------------------------------------------------------------ // devirtualizationDetailToString: describe the detailed devirtualization reason // // Arguments: // detail - detail to describe // // Returns: // descriptive string // const char* Compiler::devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail) { switch (detail) { case CORINFO_DEVIRTUALIZATION_UNKNOWN: return "unknown"; case CORINFO_DEVIRTUALIZATION_SUCCESS: return "success"; case CORINFO_DEVIRTUALIZATION_FAILED_CANON: return "object class was canonical"; case CORINFO_DEVIRTUALIZATION_FAILED_COM: return "object class was com"; case CORINFO_DEVIRTUALIZATION_FAILED_CAST: return "object class could not be cast to interface class"; case CORINFO_DEVIRTUALIZATION_FAILED_LOOKUP: return "interface method could not be found"; case CORINFO_DEVIRTUALIZATION_FAILED_DIM: return "interface method was default interface method"; case CORINFO_DEVIRTUALIZATION_FAILED_SUBCLASS: return "object not subclass of base class"; case CORINFO_DEVIRTUALIZATION_FAILED_SLOT: return "virtual method installed via explicit override"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE: return "devirtualization crossed version bubble"; case CORINFO_DEVIRTUALIZATION_MULTIPLE_IMPL: return "object class has multiple implementations of interface"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_CLASS_DECL: return "decl method is defined on class and decl method not in version bubble, and decl method not in " "type closest to version bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_INTERFACE_DECL: return "decl method is defined on interface and not in version bubble, and implementation type not " "entirely defined in bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL: return "object class not defined within version bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL_NOT_REFERENCEABLE: return "object class cannot be referenced from R2R code due to missing tokens"; case CORINFO_DEVIRTUALIZATION_FAILED_DUPLICATE_INTERFACE: return "crossgen2 virtual method algorithm and runtime algorithm differ in the presence of duplicate " "interface implementations"; case CORINFO_DEVIRTUALIZATION_FAILED_DECL_NOT_REPRESENTABLE: return "Decl method cannot be represented in R2R image"; default: return "undefined"; } } #endif // defined(DEBUG) #if TRACK_ENREG_STATS Compiler::EnregisterStats Compiler::s_enregisterStats; void Compiler::EnregisterStats::RecordLocal(const LclVarDsc* varDsc) { m_totalNumberOfVars++; if (varDsc->TypeGet() == TYP_STRUCT) { m_totalNumberOfStructVars++; } if (!varDsc->lvDoNotEnregister) { m_totalNumberOfEnregVars++; if (varDsc->TypeGet() == TYP_STRUCT) { m_totalNumberOfStructEnregVars++; } } else { switch (varDsc->GetDoNotEnregReason()) { case DoNotEnregisterReason::AddrExposed: m_addrExposed++; break; case DoNotEnregisterReason::HiddenBufferStructArg: m_hiddenStructArg++; break; case DoNotEnregisterReason::DontEnregStructs: m_dontEnregStructs++; break; case DoNotEnregisterReason::NotRegSizeStruct: m_notRegSizeStruct++; break; case DoNotEnregisterReason::LocalField: m_localField++; break; case DoNotEnregisterReason::VMNeedsStackAddr: m_VMNeedsStackAddr++; break; case DoNotEnregisterReason::LiveInOutOfHandler: m_liveInOutHndlr++; break; case DoNotEnregisterReason::BlockOp: m_blockOp++; break; case DoNotEnregisterReason::IsStructArg: m_structArg++; break; case DoNotEnregisterReason::DepField: m_depField++; break; case DoNotEnregisterReason::NoRegVars: m_noRegVars++; break; case DoNotEnregisterReason::MinOptsGC: m_minOptsGC++; break; #if !defined(TARGET_64BIT) case DoNotEnregisterReason::LongParamField: m_longParamField++; break; #endif #ifdef JIT32_GCENCODER case DoNotEnregisterReason::PinningRef: m_PinningRef++; break; #endif case DoNotEnregisterReason::LclAddrNode: m_lclAddrNode++; break; case DoNotEnregisterReason::CastTakesAddr: m_castTakesAddr++; break; case DoNotEnregisterReason::StoreBlkSrc: m_storeBlkSrc++; break; case DoNotEnregisterReason::OneAsgRetyping: m_oneAsgRetyping++; break; case DoNotEnregisterReason::SwizzleArg: m_swizzleArg++; break; case DoNotEnregisterReason::BlockOpRet: m_blockOpRet++; break; case DoNotEnregisterReason::ReturnSpCheck: m_returnSpCheck++; break; case DoNotEnregisterReason::SimdUserForcesDep: m_simdUserForcesDep++; break; default: unreached(); break; } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::AddrExposed) { // We can't `assert(IsAddressExposed())` because `fgAdjustForAddressExposedOrWrittenThis` // does not clear `m_doNotEnregReason` on `this`. switch (varDsc->GetAddrExposedReason()) { case AddressExposedReason::PARENT_EXPOSED: m_parentExposed++; break; case AddressExposedReason::TOO_CONSERVATIVE: m_tooConservative++; break; case AddressExposedReason::ESCAPE_ADDRESS: m_escapeAddress++; break; case AddressExposedReason::WIDE_INDIR: m_wideIndir++; break; case AddressExposedReason::OSR_EXPOSED: m_osrExposed++; break; case AddressExposedReason::STRESS_LCL_FLD: m_stressLclFld++; break; case AddressExposedReason::COPY_FLD_BY_FLD: m_copyFldByFld++; break; case AddressExposedReason::DISPATCH_RET_BUF: m_dispatchRetBuf++; break; default: unreached(); break; } } } } void Compiler::EnregisterStats::Dump(FILE* fout) const { const unsigned totalNumberOfNotStructVars = s_enregisterStats.m_totalNumberOfVars - s_enregisterStats.m_totalNumberOfStructVars; const unsigned totalNumberOfNotStructEnregVars = s_enregisterStats.m_totalNumberOfEnregVars - s_enregisterStats.m_totalNumberOfStructEnregVars; const unsigned notEnreg = s_enregisterStats.m_totalNumberOfVars - s_enregisterStats.m_totalNumberOfEnregVars; fprintf(fout, "\nLocals enregistration statistics:\n"); if (m_totalNumberOfVars == 0) { fprintf(fout, "No locals to report.\n"); return; } fprintf(fout, "total number of locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", m_totalNumberOfVars, m_totalNumberOfEnregVars, m_totalNumberOfVars - m_totalNumberOfEnregVars, (float)m_totalNumberOfEnregVars / m_totalNumberOfVars); if (m_totalNumberOfStructVars != 0) { fprintf(fout, "total number of struct locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", m_totalNumberOfStructVars, m_totalNumberOfStructEnregVars, m_totalNumberOfStructVars - m_totalNumberOfStructEnregVars, (float)m_totalNumberOfStructEnregVars / m_totalNumberOfStructVars); } const unsigned numberOfPrimitiveLocals = totalNumberOfNotStructVars - totalNumberOfNotStructEnregVars; if (numberOfPrimitiveLocals != 0) { fprintf(fout, "total number of primitive locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", totalNumberOfNotStructVars, totalNumberOfNotStructEnregVars, numberOfPrimitiveLocals, (float)totalNumberOfNotStructEnregVars / totalNumberOfNotStructVars); } if (notEnreg == 0) { fprintf(fout, "All locals are enregistered.\n"); return; } #define PRINT_STATS(stat, total) \ if (stat != 0) \ { \ fprintf(fout, #stat " %d, ratio: %.2f\n", stat, (float)stat / total); \ } PRINT_STATS(m_addrExposed, notEnreg); PRINT_STATS(m_hiddenStructArg, notEnreg); PRINT_STATS(m_dontEnregStructs, notEnreg); PRINT_STATS(m_notRegSizeStruct, notEnreg); PRINT_STATS(m_localField, notEnreg); PRINT_STATS(m_VMNeedsStackAddr, notEnreg); PRINT_STATS(m_liveInOutHndlr, notEnreg); PRINT_STATS(m_blockOp, notEnreg); PRINT_STATS(m_structArg, notEnreg); PRINT_STATS(m_depField, notEnreg); PRINT_STATS(m_noRegVars, notEnreg); PRINT_STATS(m_minOptsGC, notEnreg); #if !defined(TARGET_64BIT) PRINT_STATS(m_longParamField, notEnreg); #endif // !TARGET_64BIT #ifdef JIT32_GCENCODER PRINT_STATS(m_PinningRef, notEnreg); #endif // JIT32_GCENCODER PRINT_STATS(m_lclAddrNode, notEnreg); PRINT_STATS(m_castTakesAddr, notEnreg); PRINT_STATS(m_storeBlkSrc, notEnreg); PRINT_STATS(m_oneAsgRetyping, notEnreg); PRINT_STATS(m_swizzleArg, notEnreg); PRINT_STATS(m_blockOpRet, notEnreg); PRINT_STATS(m_returnSpCheck, notEnreg); PRINT_STATS(m_simdUserForcesDep, notEnreg); fprintf(fout, "\nAddr exposed details:\n"); if (m_addrExposed == 0) { fprintf(fout, "\nNo address exposed locals to report.\n"); return; } PRINT_STATS(m_parentExposed, m_addrExposed); PRINT_STATS(m_tooConservative, m_addrExposed); PRINT_STATS(m_escapeAddress, m_addrExposed); PRINT_STATS(m_wideIndir, m_addrExposed); PRINT_STATS(m_osrExposed, m_addrExposed); PRINT_STATS(m_stressLclFld, m_addrExposed); PRINT_STATS(m_copyFldByFld, m_addrExposed); PRINT_STATS(m_dispatchRetBuf, m_addrExposed); } #endif // TRACK_ENREG_STATS
1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/jit/compiler.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Represents the method data we are currently JIT-compiling. XX XX An instance of this class is created for every method we JIT. XX XX This contains all the info needed for the method. So allocating a XX XX a new instance per method makes it thread-safe. XX XX It should be used to do all the memory management for the compiler run. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ #ifndef _COMPILER_H_ #define _COMPILER_H_ /*****************************************************************************/ #include "jit.h" #include "opcode.h" #include "varset.h" #include "jitstd.h" #include "jithashtable.h" #include "gentree.h" #include "debuginfo.h" #include "lir.h" #include "block.h" #include "inline.h" #include "jiteh.h" #include "instr.h" #include "regalloc.h" #include "sm.h" #include "cycletimer.h" #include "blockset.h" #include "arraystack.h" #include "hashbv.h" #include "jitexpandarray.h" #include "tinyarray.h" #include "valuenum.h" #include "jittelemetry.h" #include "namedintrinsiclist.h" #ifdef LATE_DISASM #include "disasm.h" #endif #include "codegeninterface.h" #include "regset.h" #include "jitgcinfo.h" #if DUMP_GC_TABLES && defined(JIT32_GCENCODER) #include "gcdump.h" #endif #include "emit.h" #include "hwintrinsic.h" #include "simd.h" #include "simdashwintrinsic.h" // This is only used locally in the JIT to indicate that // a verification block should be inserted #define SEH_VERIFICATION_EXCEPTION 0xe0564552 // VER /***************************************************************************** * Forward declarations */ struct InfoHdr; // defined in GCInfo.h struct escapeMapping_t; // defined in fgdiagnostic.cpp class emitter; // defined in emit.h struct ShadowParamVarInfo; // defined in GSChecks.cpp struct InitVarDscInfo; // defined in register_arg_convention.h class FgStack; // defined in fgbasic.cpp class Instrumentor; // defined in fgprofile.cpp class SpanningTreeVisitor; // defined in fgprofile.cpp class CSE_DataFlow; // defined in OptCSE.cpp class OptBoolsDsc; // defined in optimizer.cpp #ifdef DEBUG struct IndentStack; #endif class Lowering; // defined in lower.h // The following are defined in this file, Compiler.h class Compiler; /***************************************************************************** * Unwind info */ #include "unwind.h" /*****************************************************************************/ // // Declare global operator new overloads that use the compiler's arena allocator // // I wanted to make the second argument optional, with default = CMK_Unknown, but that // caused these to be ambiguous with the global placement new operators. void* __cdecl operator new(size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new[](size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new(size_t n, void* p, const jitstd::placement_t& syntax_difference); // Requires the definitions of "operator new" so including "LoopCloning.h" after the definitions. #include "loopcloning.h" /*****************************************************************************/ /* This is included here and not earlier as it needs the definition of "CSE" * which is defined in the section above */ /*****************************************************************************/ unsigned genLog2(unsigned value); unsigned genLog2(unsigned __int64 value); unsigned ReinterpretHexAsDecimal(unsigned in); /*****************************************************************************/ const unsigned FLG_CCTOR = (CORINFO_FLG_CONSTRUCTOR | CORINFO_FLG_STATIC); #ifdef DEBUG const int BAD_STK_OFFS = 0xBAADF00D; // for LclVarDsc::lvStkOffs #endif //------------------------------------------------------------------------ // HFA info shared by LclVarDsc and fgArgTabEntry //------------------------------------------------------------------------ inline bool IsHfa(CorInfoHFAElemType kind) { return kind != CORINFO_HFA_ELEM_NONE; } inline var_types HfaTypeFromElemKind(CorInfoHFAElemType kind) { switch (kind) { case CORINFO_HFA_ELEM_FLOAT: return TYP_FLOAT; case CORINFO_HFA_ELEM_DOUBLE: return TYP_DOUBLE; #ifdef FEATURE_SIMD case CORINFO_HFA_ELEM_VECTOR64: return TYP_SIMD8; case CORINFO_HFA_ELEM_VECTOR128: return TYP_SIMD16; #endif case CORINFO_HFA_ELEM_NONE: return TYP_UNDEF; default: assert(!"Invalid HfaElemKind"); return TYP_UNDEF; } } inline CorInfoHFAElemType HfaElemKindFromType(var_types type) { switch (type) { case TYP_FLOAT: return CORINFO_HFA_ELEM_FLOAT; case TYP_DOUBLE: return CORINFO_HFA_ELEM_DOUBLE; #ifdef FEATURE_SIMD case TYP_SIMD8: return CORINFO_HFA_ELEM_VECTOR64; case TYP_SIMD16: return CORINFO_HFA_ELEM_VECTOR128; #endif case TYP_UNDEF: return CORINFO_HFA_ELEM_NONE; default: assert(!"Invalid HFA Type"); return CORINFO_HFA_ELEM_NONE; } } // The following holds the Local var info (scope information) typedef const char* VarName; // Actual ASCII string struct VarScopeDsc { unsigned vsdVarNum; // (remapped) LclVarDsc number unsigned vsdLVnum; // 'which' in eeGetLVinfo(). // Also, it is the index of this entry in the info.compVarScopes array, // which is useful since the array is also accessed via the // compEnterScopeList and compExitScopeList sorted arrays. IL_OFFSET vsdLifeBeg; // instr offset of beg of life IL_OFFSET vsdLifeEnd; // instr offset of end of life #ifdef DEBUG VarName vsdName; // name of the var #endif }; // This class stores information associated with a LclVar SSA definition. class LclSsaVarDsc { // The basic block where the definition occurs. Definitions of uninitialized variables // are considered to occur at the start of the first basic block (fgFirstBB). // // TODO-Cleanup: In the case of uninitialized variables the block is set to nullptr by // SsaBuilder and changed to fgFirstBB during value numbering. It would be useful to // investigate and perhaps eliminate this rather unexpected behavior. BasicBlock* m_block; // The GT_ASG node that generates the definition, or nullptr for definitions // of uninitialized variables. GenTreeOp* m_asg; public: LclSsaVarDsc() : m_block(nullptr), m_asg(nullptr) { } LclSsaVarDsc(BasicBlock* block) : m_block(block), m_asg(nullptr) { } LclSsaVarDsc(BasicBlock* block, GenTreeOp* asg) : m_block(block), m_asg(asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); } BasicBlock* GetBlock() const { return m_block; } void SetBlock(BasicBlock* block) { m_block = block; } GenTreeOp* GetAssignment() const { return m_asg; } void SetAssignment(GenTreeOp* asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); m_asg = asg; } ValueNumPair m_vnPair; }; // This class stores information associated with a memory SSA definition. class SsaMemDef { public: ValueNumPair m_vnPair; }; //------------------------------------------------------------------------ // SsaDefArray: A resizable array of SSA definitions. // // Unlike an ordinary resizable array implementation, this allows only element // addition (by calling AllocSsaNum) and has special handling for RESERVED_SSA_NUM // (basically it's a 1-based array). The array doesn't impose any particular // requirements on the elements it stores and AllocSsaNum forwards its arguments // to the array element constructor, this way the array supports both LclSsaVarDsc // and SsaMemDef elements. // template <typename T> class SsaDefArray { T* m_array; unsigned m_arraySize; unsigned m_count; static_assert_no_msg(SsaConfig::RESERVED_SSA_NUM == 0); static_assert_no_msg(SsaConfig::FIRST_SSA_NUM == 1); // Get the minimum valid SSA number. unsigned GetMinSsaNum() const { return SsaConfig::FIRST_SSA_NUM; } // Increase (double) the size of the array. void GrowArray(CompAllocator alloc) { unsigned oldSize = m_arraySize; unsigned newSize = max(2, oldSize * 2); T* newArray = alloc.allocate<T>(newSize); for (unsigned i = 0; i < oldSize; i++) { newArray[i] = m_array[i]; } m_array = newArray; m_arraySize = newSize; } public: // Construct an empty SsaDefArray. SsaDefArray() : m_array(nullptr), m_arraySize(0), m_count(0) { } // Reset the array (used only if the SSA form is reconstructed). void Reset() { m_count = 0; } // Allocate a new SSA number (starting with SsaConfig::FIRST_SSA_NUM). template <class... Args> unsigned AllocSsaNum(CompAllocator alloc, Args&&... args) { if (m_count == m_arraySize) { GrowArray(alloc); } unsigned ssaNum = GetMinSsaNum() + m_count; m_array[m_count++] = T(std::forward<Args>(args)...); // Ensure that the first SSA number we allocate is SsaConfig::FIRST_SSA_NUM assert((ssaNum == SsaConfig::FIRST_SSA_NUM) || (m_count > 1)); return ssaNum; } // Get the number of SSA definitions in the array. unsigned GetCount() const { return m_count; } // Get a pointer to the SSA definition at the specified index. T* GetSsaDefByIndex(unsigned index) { assert(index < m_count); return &m_array[index]; } // Check if the specified SSA number is valid. bool IsValidSsaNum(unsigned ssaNum) const { return (GetMinSsaNum() <= ssaNum) && (ssaNum < (GetMinSsaNum() + m_count)); } // Get a pointer to the SSA definition associated with the specified SSA number. T* GetSsaDef(unsigned ssaNum) { assert(ssaNum != SsaConfig::RESERVED_SSA_NUM); return GetSsaDefByIndex(ssaNum - GetMinSsaNum()); } // Get an SSA number associated with the specified SSA def (that must be in this array). unsigned GetSsaNum(T* ssaDef) { assert((m_array <= ssaDef) && (ssaDef < &m_array[m_count])); return GetMinSsaNum() + static_cast<unsigned>(ssaDef - &m_array[0]); } }; enum RefCountState { RCS_INVALID, // not valid to get/set ref counts RCS_EARLY, // early counts for struct promotion and struct passing RCS_NORMAL, // normal ref counts (from lvaMarkRefs onward) }; #ifdef DEBUG // Reasons why we can't enregister a local. enum class DoNotEnregisterReason { None, AddrExposed, // the address of this local is exposed. DontEnregStructs, // struct enregistration is disabled. NotRegSizeStruct, // the struct size does not much any register size, usually the struct size is too big. LocalField, // the local is accessed with LCL_FLD, note we can do it not only for struct locals. VMNeedsStackAddr, LiveInOutOfHandler, // the local is alive in and out of exception handler and not signle def. BlockOp, // Is read or written via a block operation. IsStructArg, // Is a struct passed as an argument in a way that requires a stack location. DepField, // It is a field of a dependently promoted struct NoRegVars, // opts.compFlags & CLFLG_REGVAR is not set MinOptsGC, // It is a GC Ref and we are compiling MinOpts #if !defined(TARGET_64BIT) LongParamField, // It is a decomposed field of a long parameter. #endif #ifdef JIT32_GCENCODER PinningRef, #endif LclAddrNode, // the local is accessed with LCL_ADDR_VAR/FLD. CastTakesAddr, StoreBlkSrc, // the local is used as STORE_BLK source. OneAsgRetyping, // fgMorphOneAsgBlockOp prevents this local from being enregister. SwizzleArg, // the local is passed using LCL_FLD as another type. BlockOpRet, // the struct is returned and it promoted or there is a cast. ReturnSpCheck, // the local is used to do SP check SimdUserForcesDep, // a promoted struct was used by a SIMD/HWI node; it must be dependently promoted HiddenBufferStructArg // the argument is a hidden return buffer passed to a method. }; enum class AddressExposedReason { NONE, PARENT_EXPOSED, // This is a promoted field but the parent is exposed. TOO_CONSERVATIVE, // Were marked as exposed to be conservative, fix these places. ESCAPE_ADDRESS, // The address is escaping, for example, passed as call argument. WIDE_INDIR, // We access via indirection with wider type. OSR_EXPOSED, // It was exposed in the original method, osr has to repeat it. STRESS_LCL_FLD, // Stress mode replaces localVar with localFld and makes them addrExposed. COPY_FLD_BY_FLD, // Field by field copy takes the address of the local, can be fixed. DISPATCH_RET_BUF // Caller return buffer dispatch. }; #endif // DEBUG class LclVarDsc { public: // The constructor. Most things can just be zero'ed. // // Initialize the ArgRegs to REG_STK. // Morph will update if this local is passed in a register. LclVarDsc() : _lvArgReg(REG_STK) , #if FEATURE_MULTIREG_ARGS _lvOtherArgReg(REG_STK) , #endif // FEATURE_MULTIREG_ARGS lvClassHnd(NO_CLASS_HANDLE) , lvRefBlks(BlockSetOps::UninitVal()) , lvPerSsaData() { } // note this only packs because var_types is a typedef of unsigned char var_types lvType : 5; // TYP_INT/LONG/FLOAT/DOUBLE/REF unsigned char lvIsParam : 1; // is this a parameter? unsigned char lvIsRegArg : 1; // is this an argument that was passed by register? unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP) unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the // variable is in the same register for the entire function. unsigned char lvTracked : 1; // is this a tracked variable? bool lvTrackedNonStruct() { return lvTracked && lvType != TYP_STRUCT; } unsigned char lvPinned : 1; // is this a pinned variable? unsigned char lvMustInit : 1; // must be initialized private: bool m_addrExposed : 1; // The address of this variable is "exposed" -- passed as an argument, stored in a // global location, etc. // We cannot reason reliably about the value of the variable. public: unsigned char lvDoNotEnregister : 1; // Do not enregister this variable. unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects // struct promotion. unsigned char lvLiveInOutOfHndlr : 1; // The variable is live in or out of an exception handler, and therefore must // be on the stack (at least at those boundaries.) unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder) unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable. unsigned char lvHasLdAddrOp : 1; // has ldloca or ldarga opcode on this local. unsigned char lvStackByref : 1; // This is a compiler temporary of TYP_BYREF that is known to point into our local // stack frame. unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local unsigned char lvHasMultipleILStoreOp : 1; // there is more than one STLOC on this local unsigned char lvIsTemp : 1; // Short-lifetime compiler temp #if defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsImplicitByRef : 1; // Set if the argument is an implicit byref. #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsBoolean : 1; // set if variable is boolean unsigned char lvSingleDef : 1; // variable has a single def // before lvaMarkLocalVars: identifies ref type locals that can get type updates // after lvaMarkLocalVars: identifies locals that are suitable for optAddCopies unsigned char lvSingleDefRegCandidate : 1; // variable has a single def and hence is a register candidate // Currently, this is only used to decide if an EH variable can be // a register candiate or not. unsigned char lvDisqualifySingleDefRegCandidate : 1; // tracks variable that are disqualified from register // candidancy unsigned char lvSpillAtSingleDef : 1; // variable has a single def (as determined by LSRA interval scan) // and is spilled making it candidate to spill right after the // first (and only) definition. // Note: We cannot reuse lvSingleDefRegCandidate because it is set // in earlier phase and the information might not be appropriate // in LSRA. unsigned char lvDisqualify : 1; // variable is no longer OK for add copy optimization unsigned char lvVolatileHint : 1; // hint for AssertionProp #ifndef TARGET_64BIT unsigned char lvStructDoubleAlign : 1; // Must we double align this struct? #endif // !TARGET_64BIT #ifdef TARGET_64BIT unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long #endif #ifdef DEBUG unsigned char lvKeepType : 1; // Don't change the type of this variable unsigned char lvNoLclFldStress : 1; // Can't apply local field stress on this one #endif unsigned char lvIsPtr : 1; // Might this be used in an address computation? (used by buffer overflow security // checks) unsigned char lvIsUnsafeBuffer : 1; // Does this contain an unsafe buffer requiring buffer overflow security checks? unsigned char lvPromoted : 1; // True when this local is a promoted struct, a normed struct, or a "split" long on a // 32-bit target. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to indicate whether // references to the arg are being rewritten as references to a promoted shadow local. unsigned char lvIsStructField : 1; // Is this local var a field of a promoted struct local? unsigned char lvOverlappingFields : 1; // True when we have a struct with possibly overlapping fields unsigned char lvContainsHoles : 1; // True when we have a promoted struct that contains holes unsigned char lvCustomLayout : 1; // True when this struct has "CustomLayout" unsigned char lvIsMultiRegArg : 1; // true if this is a multireg LclVar struct used in an argument context unsigned char lvIsMultiRegRet : 1; // true if this is a multireg LclVar struct assigned from a multireg call #ifdef DEBUG unsigned char lvHiddenBufferStructArg : 1; // True when this struct (or its field) are passed as hidden buffer // pointer. #endif #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _lvHfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif // FEATURE_HFA_FIELDS_PRESENT #ifdef DEBUG // TODO-Cleanup: See the note on lvSize() - this flag is only in use by asserts that are checking for struct // types, and is needed because of cases where TYP_STRUCT is bashed to an integral type. // Consider cleaning this up so this workaround is not required. unsigned char lvUnusedStruct : 1; // All references to this promoted struct are through its field locals. // I.e. there is no longer any reference to the struct directly. // In this case we can simply remove this struct local. unsigned char lvUndoneStructPromotion : 1; // The struct promotion was undone and hence there should be no // reference to the fields of this struct. #endif unsigned char lvLRACandidate : 1; // Tracked for linear scan register allocation purposes #ifdef FEATURE_SIMD // Note that both SIMD vector args and locals are marked as lvSIMDType = true, but the // type of an arg node is TYP_BYREF and a local node is TYP_SIMD*. unsigned char lvSIMDType : 1; // This is a SIMD struct unsigned char lvUsedInSIMDIntrinsic : 1; // This tells lclvar is used for simd intrinsic unsigned char lvSimdBaseJitType : 5; // Note: this only packs because CorInfoType has less than 32 entries CorInfoType GetSimdBaseJitType() const { return (CorInfoType)lvSimdBaseJitType; } void SetSimdBaseJitType(CorInfoType simdBaseJitType) { assert(simdBaseJitType < (1 << 5)); lvSimdBaseJitType = (unsigned char)simdBaseJitType; } var_types GetSimdBaseType() const; #endif // FEATURE_SIMD unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct. unsigned char lvClassIsExact : 1; // lvClassHandle is the exact type #ifdef DEBUG unsigned char lvClassInfoUpdated : 1; // true if this var has updated class handle or exactness #endif unsigned char lvImplicitlyReferenced : 1; // true if there are non-IR references to this local (prolog, epilog, gc, // eh) unsigned char lvSuppressedZeroInit : 1; // local needs zero init if we transform tail call to loop unsigned char lvHasExplicitInit : 1; // The local is explicitly initialized and doesn't need zero initialization in // the prolog. If the local has gc pointers, there are no gc-safe points // between the prolog and the explicit initialization. union { unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct // local. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to point to the // struct local created to model the parameter's struct promotion, if any. unsigned lvParentLcl; // The index of the local var representing the parent (i.e. the promoted struct local). // Valid on promoted struct local fields. }; unsigned char lvFieldCnt; // Number of fields in the promoted VarDsc. unsigned char lvFldOffset; unsigned char lvFldOrdinal; #ifdef DEBUG unsigned char lvSingleDefDisqualifyReason = 'H'; #endif #if FEATURE_MULTIREG_ARGS regNumber lvRegNumForSlot(unsigned slotNum) { if (slotNum == 0) { return (regNumber)_lvArgReg; } else if (slotNum == 1) { return GetOtherArgReg(); } else { assert(false && "Invalid slotNum!"); } unreached(); } #endif // FEATURE_MULTIREG_ARGS CorInfoHFAElemType GetLvHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _lvHfaElemKind; #else NOWAY_MSG("GetLvHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif // FEATURE_HFA_FIELDS_PRESENT } void SetLvHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _lvHfaElemKind = elemKind; #else NOWAY_MSG("SetLvHfaElemKind"); #endif // FEATURE_HFA_FIELDS_PRESENT } bool lvIsHfa() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetLvHfaElemKind()); } else { return false; } } bool lvIsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return lvIsRegArg && lvIsHfa(); } else { return false; } } //------------------------------------------------------------------------------ // lvHfaSlots: Get the number of slots used by an HFA local // // Return Value: // On Arm64 - Returns 1-4 indicating the number of register slots used by the HFA // On Arm32 - Returns the total number of single FP register slots used by the HFA, max is 8 // unsigned lvHfaSlots() const { assert(lvIsHfa()); assert(varTypeIsStruct(lvType)); unsigned slots = 0; #ifdef TARGET_ARM slots = lvExactSize / sizeof(float); assert(slots <= 8); #elif defined(TARGET_ARM64) switch (GetLvHfaElemKind()) { case CORINFO_HFA_ELEM_NONE: assert(!"lvHfaSlots called for non-HFA"); break; case CORINFO_HFA_ELEM_FLOAT: assert((lvExactSize % 4) == 0); slots = lvExactSize >> 2; break; case CORINFO_HFA_ELEM_DOUBLE: case CORINFO_HFA_ELEM_VECTOR64: assert((lvExactSize % 8) == 0); slots = lvExactSize >> 3; break; case CORINFO_HFA_ELEM_VECTOR128: assert((lvExactSize % 16) == 0); slots = lvExactSize >> 4; break; default: unreached(); } assert(slots <= 4); #endif // TARGET_ARM64 return slots; } // lvIsMultiRegArgOrRet() // returns true if this is a multireg LclVar struct used in an argument context // or if this is a multireg LclVar struct assigned from a multireg call bool lvIsMultiRegArgOrRet() { return lvIsMultiRegArg || lvIsMultiRegRet; } #if defined(DEBUG) private: DoNotEnregisterReason m_doNotEnregReason; AddressExposedReason m_addrExposedReason; public: void SetDoNotEnregReason(DoNotEnregisterReason reason) { m_doNotEnregReason = reason; } DoNotEnregisterReason GetDoNotEnregReason() const { return m_doNotEnregReason; } AddressExposedReason GetAddrExposedReason() const { return m_addrExposedReason; } #endif // DEBUG public: void SetAddressExposed(bool value DEBUGARG(AddressExposedReason reason)) { m_addrExposed = value; INDEBUG(m_addrExposedReason = reason); } void CleanAddressExposed() { m_addrExposed = false; } bool IsAddressExposed() const { return m_addrExposed; } #ifdef DEBUG void SetHiddenBufferStructArg(char value) { lvHiddenBufferStructArg = value; } bool IsHiddenBufferStructArg() const { return lvHiddenBufferStructArg; } #endif private: regNumberSmall _lvRegNum; // Used to store the register this variable is in (or, the low register of a // register pair). It is set during codegen any time the // variable is enregistered (lvRegister is only set // to non-zero if the variable gets the same register assignment for its entire // lifetime). #if !defined(TARGET_64BIT) regNumberSmall _lvOtherReg; // Used for "upper half" of long var. #endif // !defined(TARGET_64BIT) regNumberSmall _lvArgReg; // The (first) register in which this argument is passed. #if FEATURE_MULTIREG_ARGS regNumberSmall _lvOtherArgReg; // Used for the second part of the struct passed in a register. // Note this is defined but not used by ARM32 #endif // FEATURE_MULTIREG_ARGS regNumberSmall _lvArgInitReg; // the register into which the argument is moved at entry public: // The register number is stored in a small format (8 bits), but the getters return and the setters take // a full-size (unsigned) format, to localize the casts here. ///////////////////// regNumber GetRegNum() const { return (regNumber)_lvRegNum; } void SetRegNum(regNumber reg) { _lvRegNum = (regNumberSmall)reg; assert(_lvRegNum == reg); } ///////////////////// #if defined(TARGET_64BIT) regNumber GetOtherReg() const { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings return REG_NA; } void SetOtherReg(regNumber reg) { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings } #else // !TARGET_64BIT regNumber GetOtherReg() const { return (regNumber)_lvOtherReg; } void SetOtherReg(regNumber reg) { _lvOtherReg = (regNumberSmall)reg; assert(_lvOtherReg == reg); } #endif // !TARGET_64BIT ///////////////////// regNumber GetArgReg() const { return (regNumber)_lvArgReg; } void SetArgReg(regNumber reg) { _lvArgReg = (regNumberSmall)reg; assert(_lvArgReg == reg); } #if FEATURE_MULTIREG_ARGS regNumber GetOtherArgReg() const { return (regNumber)_lvOtherArgReg; } void SetOtherArgReg(regNumber reg) { _lvOtherArgReg = (regNumberSmall)reg; assert(_lvOtherArgReg == reg); } #endif // FEATURE_MULTIREG_ARGS #ifdef FEATURE_SIMD // Is this is a SIMD struct? bool lvIsSIMDType() const { return lvSIMDType; } // Is this is a SIMD struct which is used for SIMD intrinsic? bool lvIsUsedInSIMDIntrinsic() const { return lvUsedInSIMDIntrinsic; } #else // If feature_simd not enabled, return false bool lvIsSIMDType() const { return false; } bool lvIsUsedInSIMDIntrinsic() const { return false; } #endif ///////////////////// regNumber GetArgInitReg() const { return (regNumber)_lvArgInitReg; } void SetArgInitReg(regNumber reg) { _lvArgInitReg = (regNumberSmall)reg; assert(_lvArgInitReg == reg); } ///////////////////// bool lvIsRegCandidate() const { return lvLRACandidate != 0; } bool lvIsInReg() const { return lvIsRegCandidate() && (GetRegNum() != REG_STK); } regMaskTP lvRegMask() const { regMaskTP regMask = RBM_NONE; if (varTypeUsesFloatReg(TypeGet())) { if (GetRegNum() != REG_STK) { regMask = genRegMaskFloat(GetRegNum(), TypeGet()); } } else { if (GetRegNum() != REG_STK) { regMask = genRegMask(GetRegNum()); } } return regMask; } unsigned short lvVarIndex; // variable tracking index private: unsigned short m_lvRefCnt; // unweighted (real) reference count. For implicit by reference // parameters, this gets hijacked from fgResetImplicitByRefRefCount // through fgMarkDemotedImplicitByRefArgs, to provide a static // appearance count (computed during address-exposed analysis) // that fgMakeOutgoingStructArgCopy consults during global morph // to determine if eliding its copy is legal. weight_t m_lvRefCntWtd; // weighted reference count public: unsigned short lvRefCnt(RefCountState state = RCS_NORMAL) const; void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL); void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL); weight_t lvRefCntWtd(RefCountState state = RCS_NORMAL) const; void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL); void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL); private: int lvStkOffs; // stack offset of home in bytes. public: int GetStackOffset() const { return lvStkOffs; } void SetStackOffset(int offset) { lvStkOffs = offset; } unsigned lvExactSize; // (exact) size of the type in bytes // Is this a promoted struct? // This method returns true only for structs (including SIMD structs), not for // locals that are split on a 32-bit target. // It is only necessary to use this: // 1) if only structs are wanted, and // 2) if Lowering has already been done. // Otherwise lvPromoted is valid. bool lvPromotedStruct() { #if !defined(TARGET_64BIT) return (lvPromoted && !varTypeIsLong(lvType)); #else // defined(TARGET_64BIT) return lvPromoted; #endif // defined(TARGET_64BIT) } unsigned lvSize() const; size_t lvArgStackSize() const; unsigned lvSlotNum; // original slot # (if remapped) typeInfo lvVerTypeInfo; // type info needed for verification // class handle for the local or null if not known or not a class, // for a struct handle use `GetStructHnd()`. CORINFO_CLASS_HANDLE lvClassHnd; // Get class handle for a struct local or implicitByRef struct local. CORINFO_CLASS_HANDLE GetStructHnd() const { #ifdef FEATURE_SIMD if (lvSIMDType && (m_layout == nullptr)) { return NO_CLASS_HANDLE; } #endif assert(m_layout != nullptr); #if defined(TARGET_AMD64) || defined(TARGET_ARM64) assert(varTypeIsStruct(TypeGet()) || (lvIsImplicitByRef && (TypeGet() == TYP_BYREF))); #else assert(varTypeIsStruct(TypeGet())); #endif CORINFO_CLASS_HANDLE structHnd = m_layout->GetClassHandle(); assert(structHnd != NO_CLASS_HANDLE); return structHnd; } CORINFO_FIELD_HANDLE lvFieldHnd; // field handle for promoted struct fields private: ClassLayout* m_layout; // layout info for structs public: BlockSet lvRefBlks; // Set of blocks that contain refs Statement* lvDefStmt; // Pointer to the statement with the single definition void lvaDisqualifyVar(); // Call to disqualify a local variable from use in optAddCopies var_types TypeGet() const { return (var_types)lvType; } bool lvStackAligned() const { assert(lvIsStructField); return ((lvFldOffset % TARGET_POINTER_SIZE) == 0); } bool lvNormalizeOnLoad() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. (lvIsParam || m_addrExposed || lvIsStructField); } bool lvNormalizeOnStore() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. !(lvIsParam || m_addrExposed || lvIsStructField); } void incRefCnts(weight_t weight, Compiler* pComp, RefCountState state = RCS_NORMAL, bool propagate = true); var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { assert(lvIsHfa()); return HfaTypeFromElemKind(GetLvHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type) { if (GlobalJitOptions::compFeatureHfa) { CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetLvHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetLvHfaElemKind() == elemKind); } } // Returns true if this variable contains GC pointers (including being a GC pointer itself). bool HasGCPtr() const { return varTypeIsGC(lvType) || ((lvType == TYP_STRUCT) && m_layout->HasGCPtr()); } // Returns the layout of a struct variable. ClassLayout* GetLayout() const { assert(varTypeIsStruct(lvType)); return m_layout; } // Sets the layout of a struct variable. void SetLayout(ClassLayout* layout) { assert(varTypeIsStruct(lvType)); assert((m_layout == nullptr) || ClassLayout::AreCompatible(m_layout, layout)); m_layout = layout; } SsaDefArray<LclSsaVarDsc> lvPerSsaData; // Returns the address of the per-Ssa data for the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). LclSsaVarDsc* GetPerSsaData(unsigned ssaNum) { return lvPerSsaData.GetSsaDef(ssaNum); } // Returns the SSA number for "ssaDef". Requires "ssaDef" to be a valid definition // of this variable. unsigned GetSsaNumForSsaDef(LclSsaVarDsc* ssaDef) { return lvPerSsaData.GetSsaNum(ssaDef); } var_types GetRegisterType(const GenTreeLclVarCommon* tree) const; var_types GetRegisterType() const; var_types GetActualRegisterType() const; bool IsEnregisterableType() const { return GetRegisterType() != TYP_UNDEF; } bool IsEnregisterableLcl() const { if (lvDoNotEnregister) { return false; } return IsEnregisterableType(); } //----------------------------------------------------------------------------- // IsAlwaysAliveInMemory: Determines if this variable's value is always // up-to-date on stack. This is possible if this is an EH-var or // we decided to spill after single-def. // bool IsAlwaysAliveInMemory() const { return lvLiveInOutOfHndlr || lvSpillAtSingleDef; } bool CanBeReplacedWithItsField(Compiler* comp) const; #ifdef DEBUG public: const char* lvReason; void PrintVarReg() const { printf("%s", getRegName(GetRegNum())); } #endif // DEBUG }; // class LclVarDsc enum class SymbolicIntegerValue : int32_t { LongMin, IntMin, ShortMin, ByteMin, Zero, One, ByteMax, UByteMax, ShortMax, UShortMax, IntMax, UIntMax, LongMax, }; inline constexpr bool operator>(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) > static_cast<int32_t>(right); } inline constexpr bool operator>=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) >= static_cast<int32_t>(right); } inline constexpr bool operator<(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) < static_cast<int32_t>(right); } inline constexpr bool operator<=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) <= static_cast<int32_t>(right); } // Represents an integral range useful for reasoning about integral casts. // It uses a symbolic representation for lower and upper bounds so // that it can efficiently handle integers of all sizes on all hosts. // // Note that the ranges represented by this class are **always** in the // "signed" domain. This is so that if we know the range a node produces, it // can be trivially used to determine if a cast above the node does or does not // overflow, which requires that the interpretation of integers be the same both // for the "input" and "output". We choose signed interpretation here because it // produces nice continuous ranges and because IR uses sign-extension for constants. // // Some examples of how ranges are computed for casts: // 1. CAST_OVF(ubyte <- uint): does not overflow for [0..UBYTE_MAX], produces the // same range - all casts that do not change the representation, i. e. have the same // "actual" input and output type, have the same "input" and "output" range. // 2. CAST_OVF(ulong <- uint): never oveflows => the "input" range is [INT_MIN..INT_MAX] // (aka all possible 32 bit integers). Produces [0..UINT_MAX] (aka all possible 32 // bit integers zero-extended to 64 bits). // 3. CAST_OVF(int <- uint): overflows for inputs larger than INT_MAX <=> less than 0 // when interpreting as signed => the "input" range is [0..INT_MAX], the same range // being the produced one as the node does not change the width of the integer. // class IntegralRange { private: SymbolicIntegerValue m_lowerBound; SymbolicIntegerValue m_upperBound; public: IntegralRange() = default; IntegralRange(SymbolicIntegerValue lowerBound, SymbolicIntegerValue upperBound) : m_lowerBound(lowerBound), m_upperBound(upperBound) { assert(lowerBound <= upperBound); } bool Contains(int64_t value) const; bool Contains(IntegralRange other) const { return (m_lowerBound <= other.m_lowerBound) && (other.m_upperBound <= m_upperBound); } bool IsPositive() { return m_lowerBound >= SymbolicIntegerValue::Zero; } bool Equals(IntegralRange other) const { return (m_lowerBound == other.m_lowerBound) && (m_upperBound == other.m_upperBound); } static int64_t SymbolicToRealValue(SymbolicIntegerValue value); static SymbolicIntegerValue LowerBoundForType(var_types type); static SymbolicIntegerValue UpperBoundForType(var_types type); static IntegralRange ForType(var_types type) { return {LowerBoundForType(type), UpperBoundForType(type)}; } static IntegralRange ForNode(GenTree* node, Compiler* compiler); static IntegralRange ForCastInput(GenTreeCast* cast); static IntegralRange ForCastOutput(GenTreeCast* cast); #ifdef DEBUG static void Print(IntegralRange range); #endif // DEBUG }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX TempsInfo XX XX XX XX The temporary lclVars allocated by the compiler for code generation XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /***************************************************************************** * * The following keeps track of temporaries allocated in the stack frame * during code-generation (after register allocation). These spill-temps are * only used if we run out of registers while evaluating a tree. * * These are different from the more common temps allocated by lvaGrabTemp(). */ class TempDsc { public: TempDsc* tdNext; private: int tdOffs; #ifdef DEBUG static const int BAD_TEMP_OFFSET = 0xDDDDDDDD; // used as a sentinel "bad value" for tdOffs in DEBUG #endif // DEBUG int tdNum; BYTE tdSize; var_types tdType; public: TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) : tdNum(_tdNum), tdSize((BYTE)_tdSize), tdType(_tdType) { #ifdef DEBUG // temps must have a negative number (so they have a different number from all local variables) assert(tdNum < 0); tdOffs = BAD_TEMP_OFFSET; #endif // DEBUG if (tdNum != _tdNum) { IMPL_LIMITATION("too many spill temps"); } } #ifdef DEBUG bool tdLegalOffset() const { return tdOffs != BAD_TEMP_OFFSET; } #endif // DEBUG int tdTempOffs() const { assert(tdLegalOffset()); return tdOffs; } void tdSetTempOffs(int offs) { tdOffs = offs; assert(tdLegalOffset()); } void tdAdjustTempOffs(int offs) { tdOffs += offs; assert(tdLegalOffset()); } int tdTempNum() const { assert(tdNum < 0); return tdNum; } unsigned tdTempSize() const { return tdSize; } var_types tdTempType() const { return tdType; } }; // interface to hide linearscan implementation from rest of compiler class LinearScanInterface { public: virtual void doLinearScan() = 0; virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0; virtual bool willEnregisterLocalVars() const = 0; #if TRACK_LSRA_STATS virtual void dumpLsraStatsCsv(FILE* file) = 0; virtual void dumpLsraStatsSummary(FILE* file) = 0; #endif // TRACK_LSRA_STATS }; LinearScanInterface* getLinearScanAllocator(Compiler* comp); // Information about arrays: their element type and size, and the offset of the first element. // We label GT_IND's that are array indices with GTF_IND_ARR_INDEX, and, for such nodes, // associate an array info via the map retrieved by GetArrayInfoMap(). This information is used, // for example, in value numbering of array index expressions. struct ArrayInfo { var_types m_elemType; CORINFO_CLASS_HANDLE m_elemStructType; unsigned m_elemSize; unsigned m_elemOffset; ArrayInfo() : m_elemType(TYP_UNDEF), m_elemStructType(nullptr), m_elemSize(0), m_elemOffset(0) { } ArrayInfo(var_types elemType, unsigned elemSize, unsigned elemOffset, CORINFO_CLASS_HANDLE elemStructType) : m_elemType(elemType), m_elemStructType(elemStructType), m_elemSize(elemSize), m_elemOffset(elemOffset) { } }; // This enumeration names the phases into which we divide compilation. The phases should completely // partition a compilation. enum Phases { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) enum_nm, #include "compphases.h" PHASE_NUMBER_OF }; extern const char* PhaseNames[]; extern const char* PhaseEnums[]; extern const LPCWSTR PhaseShortNames[]; // Specify which checks should be run after each phase // enum class PhaseChecks { CHECK_NONE, CHECK_ALL }; // Specify compiler data that a phase might modify enum class PhaseStatus : unsigned { MODIFIED_NOTHING, MODIFIED_EVERYTHING }; // The following enum provides a simple 1:1 mapping to CLR API's enum API_ICorJitInfo_Names { #define DEF_CLR_API(name) API_##name, #include "ICorJitInfo_API_names.h" API_COUNT }; //--------------------------------------------------------------- // Compilation time. // // A "CompTimeInfo" is a structure for tracking the compilation time of one or more methods. // We divide a compilation into a sequence of contiguous phases, and track the total (per-thread) cycles // of the compilation, as well as the cycles for each phase. We also track the number of bytecodes. // If there is a failure in reading a timer at any point, the "CompTimeInfo" becomes invalid, as indicated // by "m_timerFailure" being true. // If FEATURE_JIT_METHOD_PERF is not set, we define a minimal form of this, enough to let other code compile. struct CompTimeInfo { #ifdef FEATURE_JIT_METHOD_PERF // The string names of the phases. static const char* PhaseNames[]; static bool PhaseHasChildren[]; static int PhaseParent[]; static bool PhaseReportsIRSize[]; unsigned m_byteCodeBytes; unsigned __int64 m_totalCycles; unsigned __int64 m_invokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_cyclesByPhase[PHASE_NUMBER_OF]; #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRinvokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_CLRcyclesByPhase[PHASE_NUMBER_OF]; #endif unsigned m_nodeCountAfterPhase[PHASE_NUMBER_OF]; // For better documentation, we call EndPhase on // non-leaf phases. We should also call EndPhase on the // last leaf subphase; obviously, the elapsed cycles between the EndPhase // for the last leaf subphase and the EndPhase for an ancestor should be very small. // We add all such "redundant end phase" intervals to this variable below; we print // it out in a report, so we can verify that it is, indeed, very small. If it ever // isn't, this means that we're doing something significant between the end of the last // declared subphase and the end of its parent. unsigned __int64 m_parentPhaseEndSlop; bool m_timerFailure; #if MEASURE_CLRAPI_CALLS // The following measures the time spent inside each individual CLR API call. unsigned m_allClrAPIcalls; unsigned m_perClrAPIcalls[API_ICorJitInfo_Names::API_COUNT]; unsigned __int64 m_allClrAPIcycles; unsigned __int64 m_perClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; unsigned __int32 m_maxClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; #endif // MEASURE_CLRAPI_CALLS CompTimeInfo(unsigned byteCodeBytes); #endif }; #ifdef FEATURE_JIT_METHOD_PERF #if MEASURE_CLRAPI_CALLS struct WrapICorJitInfo; #endif // This class summarizes the JIT time information over the course of a run: the number of methods compiled, // and the total and maximum timings. (These are instances of the "CompTimeInfo" type described above). // The operation of adding a single method's timing to the summary may be performed concurrently by several // threads, so it is protected by a lock. // This class is intended to be used as a singleton type, with only a single instance. class CompTimeSummaryInfo { // This lock protects the fields of all CompTimeSummaryInfo(s) (of which we expect there to be one). static CritSecObject s_compTimeSummaryLock; int m_numMethods; int m_totMethods; CompTimeInfo m_total; CompTimeInfo m_maximum; int m_numFilteredMethods; CompTimeInfo m_filtered; // This can use what ever data you want to determine if the value to be added // belongs in the filtered section (it's always included in the unfiltered section) bool IncludedInFilteredData(CompTimeInfo& info); public: // This is the unique CompTimeSummaryInfo object for this instance of the runtime. static CompTimeSummaryInfo s_compTimeSummary; CompTimeSummaryInfo() : m_numMethods(0), m_totMethods(0), m_total(0), m_maximum(0), m_numFilteredMethods(0), m_filtered(0) { } // Assumes that "info" is a completed CompTimeInfo for a compilation; adds it to the summary. // This is thread safe. void AddInfo(CompTimeInfo& info, bool includePhases); // Print the summary information to "f". // This is not thread-safe; assumed to be called by only one thread. void Print(FILE* f); }; // A JitTimer encapsulates a CompTimeInfo for a single compilation. It also tracks the start of compilation, // and when the current phase started. This is intended to be part of a Compilation object. // class JitTimer { unsigned __int64 m_start; // Start of the compilation. unsigned __int64 m_curPhaseStart; // Start of the current phase. #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRcallStart; // Start of the current CLR API call (if any). unsigned __int64 m_CLRcallInvokes; // CLR API invokes under current outer so far unsigned __int64 m_CLRcallCycles; // CLR API cycles under current outer so far. int m_CLRcallAPInum; // The enum/index of the current CLR API call (or -1). static double s_cyclesPerSec; // Cached for speedier measurements #endif #ifdef DEBUG Phases m_lastPhase; // The last phase that was completed (or (Phases)-1 to start). #endif CompTimeInfo m_info; // The CompTimeInfo for this compilation. static CritSecObject s_csvLock; // Lock to protect the time log file. static FILE* s_csvFile; // The time log file handle. void PrintCsvMethodStats(Compiler* comp); private: void* operator new(size_t); void* operator new[](size_t); void operator delete(void*); void operator delete[](void*); public: // Initialized the timer instance JitTimer(unsigned byteCodeSize); static JitTimer* Create(Compiler* comp, unsigned byteCodeSize) { return ::new (comp, CMK_Unknown) JitTimer(byteCodeSize); } static void PrintCsvHeader(); // Ends the current phase (argument is for a redundant check). void EndPhase(Compiler* compiler, Phases phase); #if MEASURE_CLRAPI_CALLS // Start and end a timed CLR API call. void CLRApiCallEnter(unsigned apix); void CLRApiCallLeave(unsigned apix); #endif // MEASURE_CLRAPI_CALLS // Completes the timing of the current method, which is assumed to have "byteCodeBytes" bytes of bytecode, // and adds it to "sum". void Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases); // Attempts to query the cycle counter of the current thread. If successful, returns "true" and sets // *cycles to the cycle counter value. Otherwise, returns false and sets the "m_timerFailure" flag of // "m_info" to true. bool GetThreadCycles(unsigned __int64* cycles) { bool res = CycleTimer::GetThreadCyclesS(cycles); if (!res) { m_info.m_timerFailure = true; } return res; } static void Shutdown(); }; #endif // FEATURE_JIT_METHOD_PERF //------------------- Function/Funclet info ------------------------------- enum FuncKind : BYTE { FUNC_ROOT, // The main/root function (always id==0) FUNC_HANDLER, // a funclet associated with an EH handler (finally, fault, catch, filter handler) FUNC_FILTER, // a funclet associated with an EH filter FUNC_COUNT }; class emitLocation; struct FuncInfoDsc { FuncKind funKind; BYTE funFlags; // Currently unused, just here for padding unsigned short funEHIndex; // index, into the ebd table, of innermost EH clause corresponding to this // funclet. It is only valid if funKind field indicates this is a // EH-related funclet: FUNC_HANDLER or FUNC_FILTER #if defined(TARGET_AMD64) // TODO-AMD64-Throughput: make the AMD64 info more like the ARM info to avoid having this large static array. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; UNWIND_INFO unwindHeader; // Maximum of 255 UNWIND_CODE 'nodes' and then the unwind header. If there are an odd // number of codes, the VM or Zapper will 4-byte align the whole thing. BYTE unwindCodes[offsetof(UNWIND_INFO, UnwindCode) + (0xFF * sizeof(UNWIND_CODE))]; unsigned unwindCodeSlot; #elif defined(TARGET_X86) emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #elif defined(TARGET_ARMARCH) UnwindInfo uwi; // Unwind information for this function/funclet's hot section UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section // Note: we only have a pointer here instead of the actual object, // to save memory in the JIT case (compared to the NGEN case), // where we don't have any cold section. // Note 2: we currently don't support hot/cold splitting in functions // with EH, so uwiCold will be NULL for all funclets. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #endif // TARGET_ARMARCH #if defined(FEATURE_CFI_SUPPORT) jitstd::vector<CFI_CODE>* cfiCodes; #endif // FEATURE_CFI_SUPPORT // Eventually we may want to move rsModifiedRegsMask, lvaOutgoingArgSize, and anything else // that isn't shared between the main function body and funclets. }; struct fgArgTabEntry { GenTreeCall::Use* use; // Points to the argument's GenTreeCall::Use in gtCallArgs or gtCallThisArg. GenTreeCall::Use* lateUse; // Points to the argument's GenTreeCall::Use in gtCallLateArgs, if any. // Get the node that coresponds to this argument entry. // This is the "real" node and not a placeholder or setup node. GenTree* GetNode() const { return lateUse == nullptr ? use->GetNode() : lateUse->GetNode(); } unsigned argNum; // The original argument number, also specifies the required argument evaluation order from the IL private: regNumberSmall regNums[MAX_ARG_REG_COUNT]; // The registers to use when passing this argument, set to REG_STK for // arguments passed on the stack public: unsigned numRegs; // Count of number of registers that this argument uses. // Note that on ARM, if we have a double hfa, this reflects the number // of DOUBLE registers. #if defined(UNIX_AMD64_ABI) // Unix amd64 will split floating point types and integer types in structs // between floating point and general purpose registers. Keep track of that // information so we do not need to recompute it later. unsigned structIntRegs; unsigned structFloatRegs; #endif // UNIX_AMD64_ABI #if defined(DEBUG_ARG_SLOTS) // These fields were used to calculate stack size in stack slots for arguments // but now they are replaced by precise `m_byteOffset/m_byteSize` because of // arm64 apple abi requirements. // A slot is a pointer sized region in the OutArg area. unsigned slotNum; // When an argument is passed in the OutArg area this is the slot number in the OutArg area unsigned numSlots; // Count of number of slots that this argument uses #endif // DEBUG_ARG_SLOTS // Return number of stack slots that this argument is taking. // TODO-Cleanup: this function does not align with arm64 apple model, // delete it. In most cases we just want to know if we it is using stack or not // but in some cases we are checking if it is a multireg arg, like: // `numRegs + GetStackSlotsNumber() > 1` that is harder to replace. // unsigned GetStackSlotsNumber() const { return roundUp(GetStackByteSize(), TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; } private: unsigned _lateArgInx; // index into gtCallLateArgs list; UINT_MAX if this is not a late arg. public: unsigned tmpNum; // the LclVar number if we had to force evaluation of this arg var_types argType; // The type used to pass this argument. This is generally the original argument type, but when a // struct is passed as a scalar type, this is that type. // Note that if a struct is passed by reference, this will still be the struct type. bool needTmp : 1; // True when we force this argument's evaluation into a temp LclVar bool needPlace : 1; // True when we must replace this argument with a placeholder node bool isTmp : 1; // True when we setup a temp LclVar for this argument due to size issues with the struct bool processed : 1; // True when we have decided the evaluation order for this argument in the gtCallLateArgs bool isBackFilled : 1; // True when the argument fills a register slot skipped due to alignment requirements of // previous arguments. NonStandardArgKind nonStandardArgKind : 4; // The non-standard arg kind. Non-standard args are args that are forced // to be in certain registers or on the stack, regardless of where they // appear in the arg list. bool isStruct : 1; // True if this is a struct arg bool _isVararg : 1; // True if the argument is in a vararg context. bool passedByRef : 1; // True iff the argument is passed by reference. #if FEATURE_ARG_SPLIT bool _isSplit : 1; // True when this argument is split between the registers and OutArg area #endif // FEATURE_ARG_SPLIT #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _hfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif CorInfoHFAElemType GetHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _hfaElemKind; #else NOWAY_MSG("GetHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif } void SetHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _hfaElemKind = elemKind; #else NOWAY_MSG("SetHfaElemKind"); #endif } bool isNonStandard() const { return nonStandardArgKind != NonStandardArgKind::None; } // Returns true if the IR node for this non-standarg arg is added by fgInitArgInfo. // In this case, it must be removed by GenTreeCall::ResetArgInfo. bool isNonStandardArgAddedLate() const { switch (static_cast<NonStandardArgKind>(nonStandardArgKind)) { case NonStandardArgKind::None: case NonStandardArgKind::PInvokeFrame: case NonStandardArgKind::ShiftLow: case NonStandardArgKind::ShiftHigh: case NonStandardArgKind::FixedRetBuffer: case NonStandardArgKind::ValidateIndirectCallTarget: return false; case NonStandardArgKind::WrapperDelegateCell: case NonStandardArgKind::VirtualStubCell: case NonStandardArgKind::PInvokeCookie: case NonStandardArgKind::PInvokeTarget: case NonStandardArgKind::R2RIndirectionCell: return true; default: unreached(); } } bool isLateArg() const { bool isLate = (_lateArgInx != UINT_MAX); return isLate; } unsigned GetLateArgInx() const { assert(isLateArg()); return _lateArgInx; } void SetLateArgInx(unsigned inx) { _lateArgInx = inx; } regNumber GetRegNum() const { return (regNumber)regNums[0]; } regNumber GetOtherRegNum() const { return (regNumber)regNums[1]; } #if defined(UNIX_AMD64_ABI) SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; #endif void setRegNum(unsigned int i, regNumber regNum) { assert(i < MAX_ARG_REG_COUNT); regNums[i] = (regNumberSmall)regNum; } regNumber GetRegNum(unsigned int i) { assert(i < MAX_ARG_REG_COUNT); return (regNumber)regNums[i]; } bool IsSplit() const { #if FEATURE_ARG_SPLIT return compFeatureArgSplit() && _isSplit; #else // FEATURE_ARG_SPLIT return false; #endif } void SetSplit(bool value) { #if FEATURE_ARG_SPLIT _isSplit = value; #endif } bool IsVararg() const { return compFeatureVarArg() && _isVararg; } void SetIsVararg(bool value) { if (compFeatureVarArg()) { _isVararg = value; } } bool IsHfaArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()); } else { return false; } } bool IsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()) && isPassedInRegisters(); } else { return false; } } unsigned intRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structIntRegs; } #endif // defined(UNIX_AMD64_ABI) if (!this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } unsigned floatRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structFloatRegs; } #endif // defined(UNIX_AMD64_ABI) if (this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } // Get the number of bytes that this argument is occupying on the stack, // including padding up to the target pointer size for platforms // where a stack argument can't take less. unsigned GetStackByteSize() const { if (!IsSplit() && numRegs > 0) { return 0; } assert(!IsHfaArg() || !IsSplit()); assert(GetByteSize() > TARGET_POINTER_SIZE * numRegs); const unsigned stackByteSize = GetByteSize() - TARGET_POINTER_SIZE * numRegs; return stackByteSize; } var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { return HfaTypeFromElemKind(GetHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type, unsigned hfaSlots) { if (GlobalJitOptions::compFeatureHfa) { if (type != TYP_UNDEF) { // We must already have set the passing mode. assert(numRegs != 0 || GetStackByteSize() != 0); // We originally set numRegs according to the size of the struct, but if the size of the // hfaType is not the same as the pointer size, we need to correct it. // Note that hfaSlots is the number of registers we will use. For ARM, that is twice // the number of "double registers". unsigned numHfaRegs = hfaSlots; #ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Must be an even number of registers. assert((numRegs & 1) == 0); numHfaRegs = hfaSlots / 2; } #endif // TARGET_ARM if (!IsHfaArg()) { // We haven't previously set this; do so now. CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetHfaElemKind() == elemKind); if (isPassedInRegisters()) { numRegs = numHfaRegs; } } else { // We've already set this; ensure that it's consistent. if (isPassedInRegisters()) { assert(numRegs == numHfaRegs); } assert(type == HfaTypeFromElemKind(GetHfaElemKind())); } } } } #ifdef TARGET_ARM void SetIsBackFilled(bool backFilled) { isBackFilled = backFilled; } bool IsBackFilled() const { return isBackFilled; } #else // !TARGET_ARM void SetIsBackFilled(bool backFilled) { } bool IsBackFilled() const { return false; } #endif // !TARGET_ARM bool isPassedInRegisters() const { return !IsSplit() && (numRegs != 0); } bool isPassedInFloatRegisters() const { #ifdef TARGET_X86 return false; #else return isValidFloatArgReg(GetRegNum()); #endif } // Can we replace the struct type of this node with a primitive type for argument passing? bool TryPassAsPrimitive() const { return !IsSplit() && ((numRegs == 1) || (m_byteSize <= TARGET_POINTER_SIZE)); } #if defined(DEBUG_ARG_SLOTS) // Returns the number of "slots" used, where for this purpose a // register counts as a slot. unsigned getSlotCount() const { if (isBackFilled) { assert(isPassedInRegisters()); assert(numRegs == 1); } else if (GetRegNum() == REG_STK) { assert(!isPassedInRegisters()); assert(numRegs == 0); } else { assert(numRegs > 0); } return numSlots + numRegs; } #endif #if defined(DEBUG_ARG_SLOTS) // Returns the size as a multiple of pointer-size. // For targets without HFAs, this is the same as getSlotCount(). unsigned getSize() const { unsigned size = getSlotCount(); if (GlobalJitOptions::compFeatureHfa) { if (IsHfaRegArg()) { #ifdef TARGET_ARM // We counted the number of regs, but if they are DOUBLE hfa regs we have to double the size. if (GetHfaType() == TYP_DOUBLE) { assert(!IsSplit()); size <<= 1; } #elif defined(TARGET_ARM64) // We counted the number of regs, but if they are FLOAT hfa regs we have to halve the size, // or if they are SIMD16 vector hfa regs we have to double the size. if (GetHfaType() == TYP_FLOAT) { // Round up in case of odd HFA count. size = (size + 1) >> 1; } #ifdef FEATURE_SIMD else if (GetHfaType() == TYP_SIMD16) { size <<= 1; } #endif // FEATURE_SIMD #endif // TARGET_ARM64 } } return size; } #endif // DEBUG_ARG_SLOTS private: unsigned m_byteOffset; // byte size that this argument takes including the padding after. // For example, 1-byte arg on x64 with 8-byte alignment // will have `m_byteSize == 8`, the same arg on apple arm64 will have `m_byteSize == 1`. unsigned m_byteSize; unsigned m_byteAlignment; // usually 4 or 8 bytes (slots/registers). public: void SetByteOffset(unsigned byteOffset) { DEBUG_ARG_SLOTS_ASSERT(byteOffset / TARGET_POINTER_SIZE == slotNum); m_byteOffset = byteOffset; } unsigned GetByteOffset() const { DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == slotNum); return m_byteOffset; } void SetByteSize(unsigned byteSize, bool isStruct, bool isFloatHfa) { unsigned roundedByteSize; if (compMacOsArm64Abi()) { // Only struct types need extension or rounding to pointer size, but HFA<float> does not. if (isStruct && !isFloatHfa) { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } else { roundedByteSize = byteSize; } } else { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } #if !defined(TARGET_ARM) // Arm32 could have a struct with 8 byte alignment // which rounded size % 8 is not 0. assert(m_byteAlignment != 0); assert(roundedByteSize % m_byteAlignment == 0); #endif // TARGET_ARM #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi() && !isStruct) { assert(roundedByteSize == getSlotCount() * TARGET_POINTER_SIZE); } #endif m_byteSize = roundedByteSize; } unsigned GetByteSize() const { return m_byteSize; } void SetByteAlignment(unsigned byteAlignment) { m_byteAlignment = byteAlignment; } unsigned GetByteAlignment() const { return m_byteAlignment; } // Set the register numbers for a multireg argument. // There's nothing to do on x64/Ux because the structDesc has already been used to set the // register numbers. void SetMultiRegNums() { #if FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) if (numRegs == 1) { return; } regNumber argReg = GetRegNum(0); #ifdef TARGET_ARM unsigned int regSize = (GetHfaType() == TYP_DOUBLE) ? 2 : 1; #else unsigned int regSize = 1; #endif if (numRegs > MAX_ARG_REG_COUNT) NO_WAY("Multireg argument exceeds the maximum length"); for (unsigned int regIndex = 1; regIndex < numRegs; regIndex++) { argReg = (regNumber)(argReg + regSize); setRegNum(regIndex, argReg); } #endif // FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) } #ifdef DEBUG // Check that the value of 'isStruct' is consistent. // A struct arg must be one of the following: // - A node of struct type, // - A GT_FIELD_LIST, or // - A node of a scalar type, passed in a single register or slot // (or two slots in the case of a struct pass on the stack as TYP_DOUBLE). // void checkIsStruct() const { GenTree* node = GetNode(); if (isStruct) { if (!varTypeIsStruct(node) && !node->OperIs(GT_FIELD_LIST)) { // This is the case where we are passing a struct as a primitive type. // On most targets, this is always a single register or slot. // However, on ARM this could be two slots if it is TYP_DOUBLE. bool isPassedAsPrimitiveType = ((numRegs == 1) || ((numRegs == 0) && (GetByteSize() <= TARGET_POINTER_SIZE))); #ifdef TARGET_ARM if (!isPassedAsPrimitiveType) { if (node->TypeGet() == TYP_DOUBLE && numRegs == 0 && (numSlots == 2)) { isPassedAsPrimitiveType = true; } } #endif // TARGET_ARM assert(isPassedAsPrimitiveType); } } else { assert(!varTypeIsStruct(node)); } } void Dump() const; #endif }; //------------------------------------------------------------------------- // // The class fgArgInfo is used to handle the arguments // when morphing a GT_CALL node. // class fgArgInfo { Compiler* compiler; // Back pointer to the compiler instance so that we can allocate memory GenTreeCall* callTree; // Back pointer to the GT_CALL node for this fgArgInfo unsigned argCount; // Updatable arg count value #if defined(DEBUG_ARG_SLOTS) unsigned nextSlotNum; // Updatable slot count value #endif unsigned nextStackByteOffset; unsigned stkLevel; // Stack depth when we make this call (for x86) #if defined(UNIX_X86_ABI) bool alignmentDone; // Updateable flag, set to 'true' after we've done any required alignment. unsigned stkSizeBytes; // Size of stack used by this call, in bytes. Calculated during fgMorphArgs(). unsigned padStkAlign; // Stack alignment in bytes required before arguments are pushed for this call. // Computed dynamically during codegen, based on stkSizeBytes and the current // stack level (genStackLevel) when the first stack adjustment is made for // this call. #endif #if FEATURE_FIXED_OUT_ARGS unsigned outArgSize; // Size of the out arg area for the call, will be at least MIN_ARG_AREA_FOR_CALL #endif unsigned argTableSize; // size of argTable array (equal to the argCount when done with fgMorphArgs) bool hasRegArgs; // true if we have one or more register arguments bool hasStackArgs; // true if we have one or more stack arguments bool argsComplete; // marker for state bool argsSorted; // marker for state bool needsTemps; // one or more arguments must be copied to a temp by EvalArgsToTemps fgArgTabEntry** argTable; // variable sized array of per argument descrption: (i.e. argTable[argTableSize]) private: void AddArg(fgArgTabEntry* curArgTabEntry); public: fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned argCount); fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall); fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); #ifdef UNIX_AMD64_ABI fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, const bool isStruct, const bool isFloatHfa, const bool isVararg, const regNumber otherRegNum, const unsigned structIntRegs, const unsigned structFloatRegs, const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr); #endif // UNIX_AMD64_ABI fgArgTabEntry* AddStkArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, unsigned numSlots, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); void RemorphReset(); void UpdateRegArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void UpdateStkArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots); void EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode); void ArgsComplete(); void SortArgs(); void EvalArgsToTemps(); unsigned ArgCount() const { return argCount; } fgArgTabEntry** ArgTable() const { return argTable; } #if defined(DEBUG_ARG_SLOTS) unsigned GetNextSlotNum() const { return nextSlotNum; } #endif unsigned GetNextSlotByteOffset() const { return nextStackByteOffset; } bool HasRegArgs() const { return hasRegArgs; } bool NeedsTemps() const { return needsTemps; } bool HasStackArgs() const { return hasStackArgs; } bool AreArgsComplete() const { return argsComplete; } #if FEATURE_FIXED_OUT_ARGS unsigned GetOutArgSize() const { return outArgSize; } void SetOutArgSize(unsigned newVal) { outArgSize = newVal; } #endif // FEATURE_FIXED_OUT_ARGS #if defined(UNIX_X86_ABI) void ComputeStackAlignment(unsigned curStackLevelInBytes) { padStkAlign = AlignmentPad(curStackLevelInBytes, STACK_ALIGN); } unsigned GetStkAlign() const { return padStkAlign; } void SetStkSizeBytes(unsigned newStkSizeBytes) { stkSizeBytes = newStkSizeBytes; } unsigned GetStkSizeBytes() const { return stkSizeBytes; } bool IsStkAlignmentDone() const { return alignmentDone; } void SetStkAlignmentDone() { alignmentDone = true; } #endif // defined(UNIX_X86_ABI) // Get the fgArgTabEntry for the arg at position argNum. fgArgTabEntry* GetArgEntry(unsigned argNum, bool reMorphing = true) const { fgArgTabEntry* curArgTabEntry = nullptr; if (!reMorphing) { // The arg table has not yet been sorted. curArgTabEntry = argTable[argNum]; assert(curArgTabEntry->argNum == argNum); return curArgTabEntry; } for (unsigned i = 0; i < argCount; i++) { curArgTabEntry = argTable[i]; if (curArgTabEntry->argNum == argNum) { return curArgTabEntry; } } noway_assert(!"GetArgEntry: argNum not found"); return nullptr; } void SetNeedsTemps() { needsTemps = true; } // Get the node for the arg at position argIndex. // Caller must ensure that this index is a valid arg index. GenTree* GetArgNode(unsigned argIndex) const { return GetArgEntry(argIndex)->GetNode(); } void Dump(Compiler* compiler) const; }; #ifdef DEBUG // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // We have the ability to mark source expressions with "Test Labels." // These drive assertions within the JIT, or internal JIT testing. For example, we could label expressions // that should be CSE defs, and other expressions that should uses of those defs, with a shared label. enum TestLabel // This must be kept identical to System.Runtime.CompilerServices.JitTestLabel.TestLabel. { TL_SsaName, TL_VN, // Defines a "VN equivalence class". (For full VN, including exceptions thrown). TL_VNNorm, // Like above, but uses the non-exceptional value of the expression. TL_CSE_Def, // This must be identified in the JIT as a CSE def TL_CSE_Use, // This must be identified in the JIT as a CSE use TL_LoopHoist, // Expression must (or must not) be hoisted out of the loop. }; struct TestLabelAndNum { TestLabel m_tl; ssize_t m_num; TestLabelAndNum() : m_tl(TestLabel(0)), m_num(0) { } }; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, TestLabelAndNum> NodeToTestDataMap; // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // DEBUG //------------------------------------------------------------------------- // LoopFlags: flags for the loop table. // enum LoopFlags : unsigned short { LPFLG_EMPTY = 0, // LPFLG_UNUSED = 0x0001, // LPFLG_UNUSED = 0x0002, LPFLG_ITER = 0x0004, // loop of form: for (i = icon or lclVar; test_condition(); i++) // LPFLG_UNUSED = 0x0008, LPFLG_CONTAINS_CALL = 0x0010, // If executing the loop body *may* execute a call LPFLG_VAR_INIT = 0x0020, // iterator is initialized with a local var (var # found in lpVarInit) LPFLG_CONST_INIT = 0x0040, // iterator is initialized with a constant (found in lpConstInit) LPFLG_SIMD_LIMIT = 0x0080, // iterator is compared with vector element count (found in lpConstLimit) LPFLG_VAR_LIMIT = 0x0100, // iterator is compared with a local var (var # found in lpVarLimit) LPFLG_CONST_LIMIT = 0x0200, // iterator is compared with a constant (found in lpConstLimit) LPFLG_ARRLEN_LIMIT = 0x0400, // iterator is compared with a.len or a[i].len (found in lpArrLenLimit) LPFLG_HAS_PREHEAD = 0x0800, // lpHead is known to be a preHead for this loop LPFLG_REMOVED = 0x1000, // has been removed from the loop table (unrolled or optimized away) LPFLG_DONT_UNROLL = 0x2000, // do not unroll this loop LPFLG_ASGVARS_YES = 0x4000, // "lpAsgVars" has been computed LPFLG_ASGVARS_INC = 0x8000, // "lpAsgVars" is incomplete -- vars beyond those representable in an AllVarSet // type are assigned to. }; inline constexpr LoopFlags operator~(LoopFlags a) { return (LoopFlags)(~(unsigned short)a); } inline constexpr LoopFlags operator|(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a | (unsigned short)b); } inline constexpr LoopFlags operator&(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a & (unsigned short)b); } inline LoopFlags& operator|=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a | (unsigned short)b); } inline LoopFlags& operator&=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a & (unsigned short)b); } // The following holds information about instr offsets in terms of generated code. enum class IPmappingDscKind { Prolog, // The mapping represents the start of a prolog. Epilog, // The mapping represents the start of an epilog. NoMapping, // This does not map to any IL offset. Normal, // The mapping maps to an IL offset. }; struct IPmappingDsc { emitLocation ipmdNativeLoc; // the emitter location of the native code corresponding to the IL offset IPmappingDscKind ipmdKind; // The kind of mapping ILLocation ipmdLoc; // The location for normal mappings bool ipmdIsLabel; // Can this code be a branch label? }; struct PreciseIPMapping { emitLocation nativeLoc; DebugInfo debugInfo; }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX The big guy. The sections are currently organized as : XX XX XX XX o GenTree and BasicBlock XX XX o LclVarsInfo XX XX o Importer XX XX o FlowGraph XX XX o Optimizer XX XX o RegAlloc XX XX o EEInterface XX XX o TempsInfo XX XX o RegSet XX XX o GCInfo XX XX o Instruction XX XX o ScopeInfo XX XX o PrologScopeInfo XX XX o CodeGenerator XX XX o UnwindInfo XX XX o Compiler XX XX o typeInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ struct HWIntrinsicInfo; class Compiler { friend class emitter; friend class UnwindInfo; friend class UnwindFragmentInfo; friend class UnwindEpilogInfo; friend class JitTimer; friend class LinearScan; friend class fgArgInfo; friend class Rationalizer; friend class Phase; friend class Lowering; friend class CSE_DataFlow; friend class CSE_Heuristic; friend class CodeGenInterface; friend class CodeGen; friend class LclVarDsc; friend class TempDsc; friend class LIR; friend class ObjectAllocator; friend class LocalAddressVisitor; friend struct GenTree; friend class MorphInitBlockHelper; friend class MorphCopyBlockHelper; #ifdef FEATURE_HW_INTRINSICS friend struct HWIntrinsicInfo; #endif // FEATURE_HW_INTRINSICS #ifndef TARGET_64BIT friend class DecomposeLongs; #endif // !TARGET_64BIT /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Misc structs definitions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: hashBvGlobalData hbvGlobalData; // Used by the hashBv bitvector package. #ifdef DEBUG bool verbose; bool verboseTrees; bool shouldUseVerboseTrees(); bool asciiTrees; // If true, dump trees using only ASCII characters bool shouldDumpASCIITrees(); bool verboseSsa; // If true, produce especially verbose dump output in SSA construction. bool shouldUseVerboseSsa(); bool treesBeforeAfterMorph; // If true, print trees before/after morphing (paired by an intra-compilation id: int morphNum; // This counts the the trees that have been morphed, allowing us to label each uniquely. bool doExtraSuperPmiQueries; void makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level); // Make queries recursively 'level' deep. const char* VarNameToStr(VarName name) { return name; } DWORD expensiveDebugCheckLevel; #endif #if FEATURE_MULTIREG_RET GenTree* impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)); #endif // FEATURE_MULTIREG_RET #ifdef TARGET_X86 bool isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const; #endif // TARGET_X86 //------------------------------------------------------------------------- // Functions to handle homogeneous floating-point aggregates (HFAs) in ARM/ARM64. // HFAs are one to four element structs where each element is the same // type, either all float or all double. We handle HVAs (one to four elements of // vector types) uniformly with HFAs. HFAs are treated specially // in the ARM/ARM64 Procedure Call Standards, specifically, they are passed in // floating-point registers instead of the general purpose registers. // bool IsHfa(CORINFO_CLASS_HANDLE hClass); bool IsHfa(GenTree* tree); var_types GetHfaType(GenTree* tree); unsigned GetHfaCount(GenTree* tree); var_types GetHfaType(CORINFO_CLASS_HANDLE hClass); unsigned GetHfaCount(CORINFO_CLASS_HANDLE hClass); bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv); //------------------------------------------------------------------------- // The following is used for validating format of EH table // struct EHNodeDsc; typedef struct EHNodeDsc* pEHNodeDsc; EHNodeDsc* ehnTree; // root of the tree comprising the EHnodes. EHNodeDsc* ehnNext; // root of the tree comprising the EHnodes. struct EHNodeDsc { enum EHBlockType { TryNode, FilterNode, HandlerNode, FinallyNode, FaultNode }; EHBlockType ehnBlockType; // kind of EH block IL_OFFSET ehnStartOffset; // IL offset of start of the EH block IL_OFFSET ehnEndOffset; // IL offset past end of the EH block. (TODO: looks like verInsertEhNode() sets this to // the last IL offset, not "one past the last one", i.e., the range Start to End is // inclusive). pEHNodeDsc ehnNext; // next (non-nested) block in sequential order pEHNodeDsc ehnChild; // leftmost nested block union { pEHNodeDsc ehnTryNode; // for filters and handlers, the corresponding try node pEHNodeDsc ehnHandlerNode; // for a try node, the corresponding handler node }; pEHNodeDsc ehnFilterNode; // if this is a try node and has a filter, otherwise 0 pEHNodeDsc ehnEquivalent; // if blockType=tryNode, start offset and end offset is same, void ehnSetTryNodeType() { ehnBlockType = TryNode; } void ehnSetFilterNodeType() { ehnBlockType = FilterNode; } void ehnSetHandlerNodeType() { ehnBlockType = HandlerNode; } void ehnSetFinallyNodeType() { ehnBlockType = FinallyNode; } void ehnSetFaultNodeType() { ehnBlockType = FaultNode; } bool ehnIsTryBlock() { return ehnBlockType == TryNode; } bool ehnIsFilterBlock() { return ehnBlockType == FilterNode; } bool ehnIsHandlerBlock() { return ehnBlockType == HandlerNode; } bool ehnIsFinallyBlock() { return ehnBlockType == FinallyNode; } bool ehnIsFaultBlock() { return ehnBlockType == FaultNode; } // returns true if there is any overlap between the two nodes static bool ehnIsOverlap(pEHNodeDsc node1, pEHNodeDsc node2) { if (node1->ehnStartOffset < node2->ehnStartOffset) { return (node1->ehnEndOffset >= node2->ehnStartOffset); } else { return (node1->ehnStartOffset <= node2->ehnEndOffset); } } // fails with BADCODE if inner is not completely nested inside outer static bool ehnIsNested(pEHNodeDsc inner, pEHNodeDsc outer) { return ((inner->ehnStartOffset >= outer->ehnStartOffset) && (inner->ehnEndOffset <= outer->ehnEndOffset)); } }; //------------------------------------------------------------------------- // Exception handling functions // #if !defined(FEATURE_EH_FUNCLETS) bool ehNeedsShadowSPslots() { return (info.compXcptnsCount || opts.compDbgEnC); } // 0 for methods with no EH // 1 for methods with non-nested EH, or where only the try blocks are nested // 2 for a method with a catch within a catch // etc. unsigned ehMaxHndNestingCount; #endif // !FEATURE_EH_FUNCLETS static bool jitIsBetween(unsigned value, unsigned start, unsigned end); static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end); bool bbInCatchHandlerILRange(BasicBlock* blk); bool bbInFilterILRange(BasicBlock* blk); bool bbInTryRegions(unsigned regionIndex, BasicBlock* blk); bool bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk); bool bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk); bool bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk); unsigned short bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo); unsigned short bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex); unsigned short bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex); // Returns true if "block" is the start of a try region. bool bbIsTryBeg(BasicBlock* block); // Returns true if "block" is the start of a handler or filter region. bool bbIsHandlerBeg(BasicBlock* block); // Returns true iff "block" is where control flows if an exception is raised in the // try region, and sets "*regionIndex" to the index of the try for the handler. // Differs from "IsHandlerBeg" in the case of filters, where this is true for the first // block of the filter, but not for the filter's handler. bool bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex); bool ehHasCallableHandlers(); // Return the EH descriptor for the given region index. EHblkDsc* ehGetDsc(unsigned regionIndex); // Return the EH index given a region descriptor. unsigned ehGetIndex(EHblkDsc* ehDsc); // Return the EH descriptor index of the enclosing try, for the given region index. unsigned ehGetEnclosingTryIndex(unsigned regionIndex); // Return the EH descriptor index of the enclosing handler, for the given region index. unsigned ehGetEnclosingHndIndex(unsigned regionIndex); // Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of (or nullptr if this // block is not in a 'try' region). EHblkDsc* ehGetBlockTryDsc(BasicBlock* block); // Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of (or nullptr // if this block is not in a filter or handler region). EHblkDsc* ehGetBlockHndDsc(BasicBlock* block); // Return the EH descriptor for the most nested region that may handle exceptions raised in this BasicBlock (or // nullptr if this block's exceptions propagate to caller). EHblkDsc* ehGetBlockExnFlowDsc(BasicBlock* block); EHblkDsc* ehIsBlockTryLast(BasicBlock* block); EHblkDsc* ehIsBlockHndLast(BasicBlock* block); bool ehIsBlockEHLast(BasicBlock* block); bool ehBlockHasExnFlowDsc(BasicBlock* block); // Return the region index of the most nested EH region this block is in. unsigned ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion); // Find the true enclosing try index, ignoring 'mutual protect' try. Uses IL ranges to check. unsigned ehTrueEnclosingTryIndexIL(unsigned regionIndex); // Return the index of the most nested enclosing region for a particular EH region. Returns NO_ENCLOSING_INDEX // if there is no enclosing region. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' // is set to 'true' if the enclosing region is a 'try', or 'false' if the enclosing region is a handler. // (It can never be a filter.) unsigned ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion); // A block has been deleted. Update the EH table appropriately. void ehUpdateForDeletedBlock(BasicBlock* block); // Determine whether a block can be deleted while preserving the EH normalization rules. bool ehCanDeleteEmptyBlock(BasicBlock* block); // Update the 'last' pointers in the EH table to reflect new or deleted blocks in an EH region. void ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast); // For a finally handler, find the region index that the BBJ_CALLFINALLY lives in that calls the handler, // or NO_ENCLOSING_INDEX if the BBJ_CALLFINALLY lives in the main function body. Normally, the index // is the same index as the handler (and the BBJ_CALLFINALLY lives in the 'try' region), but for AMD64 the // BBJ_CALLFINALLY lives in the enclosing try or handler region, whichever is more nested, or the main function // body. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' is set to 'true' if the // BBJ_CALLFINALLY lives in the returned index's 'try' region, or 'false' if lives in the handler region. (It never // lives in a filter.) unsigned ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion); // Find the range of basic blocks in which all BBJ_CALLFINALLY will be found that target the 'finallyIndex' region's // handler. Set begBlk to the first block, and endBlk to the block after the last block of the range // (nullptr if the last block is the last block in the program). // Precondition: 'finallyIndex' is the EH region of a try/finally clause. void ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk); #ifdef DEBUG // Given a BBJ_CALLFINALLY block and the EH region index of the finally it is calling, return // 'true' if the BBJ_CALLFINALLY is in the correct EH region. bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex); #endif // DEBUG #if defined(FEATURE_EH_FUNCLETS) // Do we need a PSPSym in the main function? For codegen purposes, we only need one // if there is a filter that protects a region with a nested EH clause (such as a // try/catch nested in the 'try' body of a try/filter/filter-handler). See // genFuncletProlog() for more details. However, the VM seems to use it for more // purposes, maybe including debugging. Until we are sure otherwise, always create // a PSPSym for functions with any EH. bool ehNeedsPSPSym() const { #ifdef TARGET_X86 return false; #else // TARGET_X86 return compHndBBtabCount > 0; #endif // TARGET_X86 } bool ehAnyFunclets(); // Are there any funclets in this function? unsigned ehFuncletCount(); // Return the count of funclets in the function unsigned bbThrowIndex(BasicBlock* blk); // Get the index to use as the cache key for sharing throw blocks #else // !FEATURE_EH_FUNCLETS bool ehAnyFunclets() { return false; } unsigned ehFuncletCount() { return 0; } unsigned bbThrowIndex(BasicBlock* blk) { return blk->bbTryIndex; } // Get the index to use as the cache key for sharing throw blocks #endif // !FEATURE_EH_FUNCLETS // Returns a flowList representing the "EH predecessors" of "blk". These are the normal predecessors of // "blk", plus one special case: if "blk" is the first block of a handler, considers the predecessor(s) of the first // first block of the corresponding try region to be "EH predecessors". (If there is a single such predecessor, // for example, we want to consider that the immediate dominator of the catch clause start block, so it's // convenient to also consider it a predecessor.) flowList* BlockPredsWithEH(BasicBlock* blk); // This table is useful for memoization of the method above. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, flowList*> BlockToFlowListMap; BlockToFlowListMap* m_blockToEHPreds; BlockToFlowListMap* GetBlockToEHPreds() { if (m_blockToEHPreds == nullptr) { m_blockToEHPreds = new (getAllocator()) BlockToFlowListMap(getAllocator()); } return m_blockToEHPreds; } void* ehEmitCookie(BasicBlock* block); UNATIVE_OFFSET ehCodeOffset(BasicBlock* block); EHblkDsc* ehInitHndRange(BasicBlock* src, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter); EHblkDsc* ehInitTryRange(BasicBlock* src, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd); EHblkDsc* ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter); EHblkDsc* ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast); void fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg); void fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast); void fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast); void fgSkipRmvdBlocks(EHblkDsc* handlerTab); void fgAllocEHTable(); void fgRemoveEHTableEntry(unsigned XTnum); #if defined(FEATURE_EH_FUNCLETS) EHblkDsc* fgAddEHTableEntry(unsigned XTnum); #endif // FEATURE_EH_FUNCLETS #if !FEATURE_EH void fgRemoveEH(); #endif // !FEATURE_EH void fgSortEHTable(); // Causes the EH table to obey some well-formedness conditions, by inserting // empty BB's when necessary: // * No block is both the first block of a handler and the first block of a try. // * No block is the first block of multiple 'try' regions. // * No block is the last block of multiple EH regions. void fgNormalizeEH(); bool fgNormalizeEHCase1(); bool fgNormalizeEHCase2(); bool fgNormalizeEHCase3(); void fgCheckForLoopsInHandlers(); #ifdef DEBUG void dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void fgVerifyHandlerTab(); void fgDispHandlerTab(); #endif // DEBUG bool fgNeedToSortEHTable; void verInitEHTree(unsigned numEHClauses); void verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab); void verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node); void verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node); void verCheckNestingLevel(EHNodeDsc* initRoot); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GenTree and BasicBlock XX XX XX XX Functions to allocate and display the GenTrees and BasicBlocks XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Functions to create nodes Statement* gtNewStmt(GenTree* expr = nullptr); Statement* gtNewStmt(GenTree* expr, const DebugInfo& di); // For unary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, bool doSimplifications = TRUE); // For binary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2); GenTreeColon* gtNewColonNode(var_types type, GenTree* elseNode, GenTree* thenNode); GenTreeQmark* gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon); GenTree* gtNewLargeOperNode(genTreeOps oper, var_types type = TYP_I_IMPL, GenTree* op1 = nullptr, GenTree* op2 = nullptr); GenTreeIntCon* gtNewIconNode(ssize_t value, var_types type = TYP_INT); GenTreeIntCon* gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq); GenTreeIntCon* gtNewNull(); GenTreeIntCon* gtNewTrue(); GenTreeIntCon* gtNewFalse(); GenTree* gtNewPhysRegNode(regNumber reg, var_types type); GenTree* gtNewJmpTableNode(); GenTree* gtNewIndOfIconHandleNode(var_types indType, size_t value, GenTreeFlags iconFlags, bool isInvariant); GenTree* gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields = nullptr); GenTreeFlags gtTokenToIconFlags(unsigned token); GenTree* gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags flags, void* compileTimeHandle); GenTree* gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd); GenTree* gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd); GenTree* gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd); GenTree* gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd); GenTree* gtNewStringLiteralNode(InfoAccessType iat, void* pValue); GenTreeIntCon* gtNewStringLiteralLength(GenTreeStrCon* node); GenTree* gtNewLconNode(__int64 value); GenTree* gtNewDconNode(double value, var_types type = TYP_DOUBLE); GenTree* gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle); GenTree* gtNewZeroConNode(var_types type); GenTree* gtNewOneConNode(var_types type); GenTreeLclVar* gtNewStoreLclVar(unsigned dstLclNum, GenTree* src); #ifdef FEATURE_SIMD GenTree* gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize); #endif GenTree* gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock); GenTree* gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg); GenTree* gtNewBitCastNode(var_types type, GenTree* arg); protected: void gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile); public: GenTreeObj* gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); void gtSetObjGcInfo(GenTreeObj* objNode); GenTree* gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); GenTree* gtNewBlockVal(GenTree* addr, unsigned size); GenTree* gtNewCpObjNode(GenTree* dst, GenTree* src, CORINFO_CLASS_HANDLE structHnd, bool isVolatile); GenTreeCall::Use* gtNewCallArgs(GenTree* node); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4); GenTreeCall::Use* gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args); GenTreeCall::Use* gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after); GenTreeCall* gtNewCallNode(gtCallTypes callType, CORINFO_METHOD_HANDLE handle, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewIndCallNode(GenTree* addr, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewHelperCallNode(unsigned helper, var_types type, GenTreeCall::Use* args = nullptr); GenTreeCall* gtNewRuntimeLookupHelperCallNode(CORINFO_RUNTIME_LOOKUP* pRuntimeLookup, GenTree* ctxTree, void* compileTimeHandle); GenTreeLclVar* gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclVarAddrNode(unsigned lclNum, var_types type = TYP_I_IMPL); GenTreeLclFld* gtNewLclFldAddrNode(unsigned lclNum, unsigned lclOffs, FieldSeqNode* fieldSeq, var_types type = TYP_I_IMPL); #ifdef FEATURE_SIMD GenTreeSIMD* gtNewSIMDNode( var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); GenTreeSIMD* gtNewSIMDNode(var_types type, GenTree* op1, GenTree* op2, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); void SetOpLclRelatedToSIMDIntrinsic(GenTree* op); #endif #ifdef FEATURE_HW_INTRINSICS GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, GenTree* op4, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree** operands, size_t operandCount, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, IntrinsicNodeBuilder&& nodeBuilder, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode( var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTree* gtNewSimdAbsNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdBinOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCeilNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAllNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAnyNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCndSelNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCreateBroadcastNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdDotProdNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdFloorNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdGetElementNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMaxNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMinNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdNarrowNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSqrtNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSumNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdUnOpNode(genTreeOps op, var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenLowerNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenUpperNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWithElementNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdZeroNode(var_types type, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode( var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID); CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType); CorInfoType getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType); #endif // FEATURE_HW_INTRINSICS GenTree* gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTreeLclFld* gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset); GenTree* gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags); GenTreeField* gtNewFieldRef(var_types type, CORINFO_FIELD_HANDLE fldHnd, GenTree* obj = nullptr, DWORD offset = 0); GenTree* gtNewIndexRef(var_types typ, GenTree* arrayOp, GenTree* indexOp); GenTreeArrLen* gtNewArrLen(var_types typ, GenTree* arrayOp, int lenOffset, BasicBlock* block); GenTreeIndir* gtNewIndir(var_types typ, GenTree* addr); GenTree* gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock); var_types gtTypeForNullCheck(GenTree* tree); void gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block); static fgArgTabEntry* gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum); static fgArgTabEntry* gtArgEntryByNode(GenTreeCall* call, GenTree* node); fgArgTabEntry* gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx); static GenTree* gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx); GenTreeOp* gtNewAssignNode(GenTree* dst, GenTree* src); GenTree* gtNewTempAssign(unsigned tmp, GenTree* val, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* gtNewRefCOMfield(GenTree* objPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp, CORINFO_CLASS_HANDLE structType, GenTree* assg); GenTree* gtNewNothingNode(); GenTree* gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTree* gtUnusedValNode(GenTree* expr); GenTree* gtNewKeepAliveNode(GenTree* op); GenTreeCast* gtNewCastNode(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeCast* gtNewCastNodeL(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeAllocObj* gtNewAllocObjNode( unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTree* op1); GenTreeAllocObj* gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent); GenTree* gtNewRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* lookupTree); GenTreeIndir* gtNewMethodTableLookup(GenTree* obj); //------------------------------------------------------------------------ // Other GenTree functions GenTree* gtClone(GenTree* tree, bool complexOK = false); // If `tree` is a lclVar with lclNum `varNum`, return an IntCns with value `varVal`; otherwise, // create a copy of `tree`, adding specified flags, replacing uses of lclVar `deepVarNum` with // IntCnses with value `deepVarVal`. GenTree* gtCloneExpr( GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal); // Create a copy of `tree`, optionally adding specifed flags, and optionally mapping uses of local // `varNum` to int constants with value `varVal`. GenTree* gtCloneExpr(GenTree* tree, GenTreeFlags addFlags = GTF_EMPTY, unsigned varNum = BAD_VAR_NUM, int varVal = 0) { return gtCloneExpr(tree, addFlags, varNum, varVal, varNum, varVal); } Statement* gtCloneStmt(Statement* stmt) { GenTree* exprClone = gtCloneExpr(stmt->GetRootNode()); return gtNewStmt(exprClone, stmt->GetDebugInfo()); } // Internal helper for cloning a call GenTreeCall* gtCloneExprCallHelper(GenTreeCall* call, GenTreeFlags addFlags = GTF_EMPTY, unsigned deepVarNum = BAD_VAR_NUM, int deepVarVal = 0); // Create copy of an inline or guarded devirtualization candidate tree. GenTreeCall* gtCloneCandidateCall(GenTreeCall* call); void gtUpdateSideEffects(Statement* stmt, GenTree* tree); void gtUpdateTreeAncestorsSideEffects(GenTree* tree); void gtUpdateStmtSideEffects(Statement* stmt); void gtUpdateNodeSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffectsPost(GenTree* tree); // Returns "true" iff the complexity (not formally defined, but first interpretation // is #of nodes in subtree) of "tree" is greater than "limit". // (This is somewhat redundant with the "GetCostEx()/GetCostSz()" fields, but can be used // before they have been set.) bool gtComplexityExceeds(GenTree** tree, unsigned limit); GenTree* gtReverseCond(GenTree* tree); static bool gtHasRef(GenTree* tree, ssize_t lclNum); bool gtHasLocalsWithAddrOp(GenTree* tree); unsigned gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz); unsigned gtSetMultiOpOrder(GenTreeMultiOp* multiOp); void gtWalkOp(GenTree** op1, GenTree** op2, GenTree* base, bool constOnly); #ifdef DEBUG unsigned gtHashValue(GenTree* tree); GenTree* gtWalkOpEffectiveVal(GenTree* op); #endif void gtPrepareCost(GenTree* tree); bool gtIsLikelyRegVar(GenTree* tree); // Returns true iff the secondNode can be swapped with firstNode. bool gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode); // Given an address expression, compute its costs and addressing mode opportunities, // and mark addressing mode candidates as GTF_DONT_CSE. // TODO-Throughput - Consider actually instantiating these early, to avoid // having to re-run the algorithm that looks for them (might also improve CQ). bool gtMarkAddrMode(GenTree* addr, int* costEx, int* costSz, var_types type); unsigned gtSetEvalOrder(GenTree* tree); void gtSetStmtInfo(Statement* stmt); // Returns "true" iff "node" has any of the side effects in "flags". bool gtNodeHasSideEffects(GenTree* node, GenTreeFlags flags); // Returns "true" iff "tree" or its (transitive) children have any of the side effects in "flags". bool gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags); // Appends 'expr' in front of 'list' // 'list' will typically start off as 'nullptr' // when 'list' is non-null a GT_COMMA node is used to insert 'expr' GenTree* gtBuildCommaList(GenTree* list, GenTree* expr); void gtExtractSideEffList(GenTree* expr, GenTree** pList, GenTreeFlags GenTreeFlags = GTF_SIDE_EFFECT, bool ignoreRoot = false); GenTree* gtGetThisArg(GenTreeCall* call); // Static fields of struct types (and sometimes the types that those are reduced to) are represented by having the // static field contain an object pointer to the boxed struct. This simplifies the GC implementation...but // complicates the JIT somewhat. This predicate returns "true" iff a node with type "fieldNodeType", representing // the given "fldHnd", is such an object pointer. bool gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd); // Return true if call is a recursive call; return false otherwise. // Note when inlining, this looks for calls back to the root method. bool gtIsRecursiveCall(GenTreeCall* call) { return gtIsRecursiveCall(call->gtCallMethHnd); } bool gtIsRecursiveCall(CORINFO_METHOD_HANDLE callMethodHandle) { return (callMethodHandle == impInlineRoot()->info.compMethodHnd); } //------------------------------------------------------------------------- GenTree* gtFoldExpr(GenTree* tree); GenTree* gtFoldExprConst(GenTree* tree); GenTree* gtFoldExprSpecial(GenTree* tree); GenTree* gtFoldBoxNullable(GenTree* tree); GenTree* gtFoldExprCompare(GenTree* tree); GenTree* gtCreateHandleCompare(genTreeOps oper, GenTree* op1, GenTree* op2, CorInfoInlineTypeCheck typeCheckInliningResult); GenTree* gtFoldExprCall(GenTreeCall* call); GenTree* gtFoldTypeCompare(GenTree* tree); GenTree* gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2); // Options to control behavior of gtTryRemoveBoxUpstreamEffects enum BoxRemovalOptions { BR_REMOVE_AND_NARROW, // remove effects, minimize remaining work, return possibly narrowed source tree BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE, // remove effects and minimize remaining work, return type handle tree BR_REMOVE_BUT_NOT_NARROW, // remove effects, return original source tree BR_DONT_REMOVE, // check if removal is possible, return copy source tree BR_DONT_REMOVE_WANT_TYPE_HANDLE, // check if removal is possible, return type handle tree BR_MAKE_LOCAL_COPY // revise box to copy to temp local and return local's address }; GenTree* gtTryRemoveBoxUpstreamEffects(GenTree* tree, BoxRemovalOptions options = BR_REMOVE_AND_NARROW); GenTree* gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp); //------------------------------------------------------------------------- // Get the handle, if any. CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent(GenTree* tree); // Get the handle, and assert if not found. CORINFO_CLASS_HANDLE gtGetStructHandle(GenTree* tree); // Get the handle for a ref type. CORINFO_CLASS_HANDLE gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull); // Get the class handle for an helper call CORINFO_CLASS_HANDLE gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull); // Get the element handle for an array of ref type. CORINFO_CLASS_HANDLE gtGetArrayElementClassHandle(GenTree* array); // Get a class handle from a helper call argument CORINFO_CLASS_HANDLE gtGetHelperArgClassHandle(GenTree* array); // Get the class handle for a field CORINFO_CLASS_HANDLE gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull); // Check if this tree is a gc static base helper call bool gtIsStaticGCBaseHelperCall(GenTree* tree); //------------------------------------------------------------------------- // Functions to display the trees #ifdef DEBUG void gtDispNode(GenTree* tree, IndentStack* indentStack, _In_z_ const char* msg, bool isLIR); void gtDispConst(GenTree* tree); void gtDispLeaf(GenTree* tree, IndentStack* indentStack); void gtDispNodeName(GenTree* tree); #if FEATURE_MULTIREG_RET unsigned gtDispMultiRegCount(GenTree* tree); #endif void gtDispRegVal(GenTree* tree); void gtDispZeroFieldSeq(GenTree* tree); void gtDispVN(GenTree* tree); void gtDispCommonEndLine(GenTree* tree); enum IndentInfo { IINone, IIArc, IIArcTop, IIArcBottom, IIEmbedded, IIError, IndentInfoCount }; void gtDispChild(GenTree* child, IndentStack* indentStack, IndentInfo arcType, _In_opt_ const char* msg = nullptr, bool topOnly = false); void gtDispTree(GenTree* tree, IndentStack* indentStack = nullptr, _In_opt_ const char* msg = nullptr, bool topOnly = false, bool isLIR = false); void gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut); int gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining); char* gtGetLclVarName(unsigned lclNum); void gtDispLclVar(unsigned lclNum, bool padForBiggestDisp = true); void gtDispLclVarStructType(unsigned lclNum); void gtDispClassLayout(ClassLayout* layout, var_types type); void gtDispILLocation(const ILLocation& loc); void gtDispStmt(Statement* stmt, const char* msg = nullptr); void gtDispBlockStmts(BasicBlock* block); void gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength); void gtGetLateArgMsg(GenTreeCall* call, GenTree* arg, int argNum, char* bufp, unsigned bufLength); void gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack); void gtDispAnyFieldSeq(FieldSeqNode* fieldSeq); void gtDispFieldSeq(FieldSeqNode* pfsn); void gtDispRange(LIR::ReadOnlyRange const& range); void gtDispTreeRange(LIR::Range& containingRange, GenTree* tree); void gtDispLIRNode(GenTree* node, const char* prefixMsg = nullptr); #endif // For tree walks enum fgWalkResult { WALK_CONTINUE, WALK_SKIP_SUBTREES, WALK_ABORT }; struct fgWalkData; typedef fgWalkResult(fgWalkPreFn)(GenTree** pTree, fgWalkData* data); typedef fgWalkResult(fgWalkPostFn)(GenTree** pTree, fgWalkData* data); static fgWalkPreFn gtMarkColonCond; static fgWalkPreFn gtClearColonCond; struct FindLinkData { GenTree* nodeToFind; GenTree** result; GenTree* parent; }; FindLinkData gtFindLink(Statement* stmt, GenTree* node); bool gtHasCatchArg(GenTree* tree); typedef ArrayStack<GenTree*> GenTreeStack; static bool gtHasCallOnStack(GenTreeStack* parentStack); //========================================================================= // BasicBlock functions #ifdef DEBUG // This is a debug flag we will use to assert when creating block during codegen // as this interferes with procedure splitting. If you know what you're doing, set // it to true before creating the block. (DEBUG only) bool fgSafeBasicBlockCreation; #endif BasicBlock* bbNewBasicBlock(BBjumpKinds jumpKind); void placeLoopAlignInstructions(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LclVarsInfo XX XX XX XX The variables to be used by the code generator. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // // For both PROMOTION_TYPE_NONE and PROMOTION_TYPE_DEPENDENT the struct will // be placed in the stack frame and it's fields must be laid out sequentially. // // For PROMOTION_TYPE_INDEPENDENT each of the struct's fields is replaced by // a local variable that can be enregistered or placed in the stack frame. // The fields do not need to be laid out sequentially // enum lvaPromotionType { PROMOTION_TYPE_NONE, // The struct local is not promoted PROMOTION_TYPE_INDEPENDENT, // The struct local is promoted, // and its field locals are independent of its parent struct local. PROMOTION_TYPE_DEPENDENT // The struct local is promoted, // but its field locals depend on its parent struct local. }; /*****************************************************************************/ enum FrameLayoutState { NO_FRAME_LAYOUT, INITIAL_FRAME_LAYOUT, PRE_REGALLOC_FRAME_LAYOUT, REGALLOC_FRAME_LAYOUT, TENTATIVE_FRAME_LAYOUT, FINAL_FRAME_LAYOUT }; public: RefCountState lvaRefCountState; // Current local ref count state bool lvaLocalVarRefCounted() const { return lvaRefCountState == RCS_NORMAL; } bool lvaTrackedFixed; // true: We cannot add new 'tracked' variable unsigned lvaCount; // total number of locals, which includes function arguments, // special arguments, IL local variables, and JIT temporary variables LclVarDsc* lvaTable; // variable descriptor table unsigned lvaTableCnt; // lvaTable size (>= lvaCount) unsigned lvaTrackedCount; // actual # of locals being tracked unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked #ifdef DEBUG VARSET_TP lvaTrackedVars; // set of tracked variables #endif #ifndef TARGET_64BIT VARSET_TP lvaLongVars; // set of long (64-bit) variables #endif VARSET_TP lvaFloatVars; // set of floating-point (32-bit and 64-bit) variables unsigned lvaCurEpoch; // VarSets are relative to a specific set of tracked var indices. // It that changes, this changes. VarSets from different epochs // cannot be meaningfully combined. unsigned GetCurLVEpoch() { return lvaCurEpoch; } // reverse map of tracked number to var number unsigned lvaTrackedToVarNumSize; unsigned* lvaTrackedToVarNum; #if DOUBLE_ALIGN #ifdef DEBUG // # of procs compiled a with double-aligned stack static unsigned s_lvaDoubleAlignedProcsCount; #endif #endif // Getters and setters for address-exposed and do-not-enregister local var properties. bool lvaVarAddrExposed(unsigned varNum) const; void lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason)); void lvaSetHiddenBufferStructArg(unsigned varNum); void lvaSetVarLiveInOutOfHandler(unsigned varNum); bool lvaVarDoNotEnregister(unsigned varNum); void lvSetMinOptsDoNotEnreg(); bool lvaEnregEHVars; bool lvaEnregMultiRegVars; void lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason)); unsigned lvaVarargsHandleArg; #ifdef TARGET_X86 unsigned lvaVarargsBaseOfStkArgs; // Pointer (computed based on incoming varargs handle) to the start of the stack // arguments #endif // TARGET_X86 unsigned lvaInlinedPInvokeFrameVar; // variable representing the InlinedCallFrame unsigned lvaReversePInvokeFrameVar; // variable representing the reverse PInvoke frame #if FEATURE_FIXED_OUT_ARGS unsigned lvaPInvokeFrameRegSaveVar; // variable representing the RegSave for PInvoke inlining. #endif unsigned lvaMonAcquired; // boolean variable introduced into in synchronized methods // that tracks whether the lock has been taken unsigned lvaArg0Var; // The lclNum of arg0. Normally this will be info.compThisArg. // However, if there is a "ldarga 0" or "starg 0" in the IL, // we will redirect all "ldarg(a) 0" and "starg 0" to this temp. unsigned lvaInlineeReturnSpillTemp; // The temp to spill the non-VOID return expression // in case there are multiple BBJ_RETURN blocks in the inlinee // or if the inlinee has GC ref locals. #if FEATURE_FIXED_OUT_ARGS unsigned lvaOutgoingArgSpaceVar; // dummy TYP_LCLBLK var for fixed outgoing argument space PhasedVar<unsigned> lvaOutgoingArgSpaceSize; // size of fixed outgoing argument space #endif // FEATURE_FIXED_OUT_ARGS static unsigned GetOutgoingArgByteSize(unsigned sizeWithoutPadding) { return roundUp(sizeWithoutPadding, TARGET_POINTER_SIZE); } // Variable representing the return address. The helper-based tailcall // mechanism passes the address of the return address to a runtime helper // where it is used to detect tail-call chains. unsigned lvaRetAddrVar; #if defined(DEBUG) && defined(TARGET_XARCH) unsigned lvaReturnSpCheck; // Stores SP to confirm it is not corrupted on return. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) unsigned lvaCallSpCheck; // Stores SP to confirm it is not corrupted after every call. #endif // defined(DEBUG) && defined(TARGET_X86) bool lvaGenericsContextInUse; bool lvaKeepAliveAndReportThis(); // Synchronized instance method of a reference type, or // CORINFO_GENERICS_CTXT_FROM_THIS? bool lvaReportParamTypeArg(); // Exceptions and CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG? //------------------------------------------------------------------------- // All these frame offsets are inter-related and must be kept in sync #if !defined(FEATURE_EH_FUNCLETS) // This is used for the callable handlers unsigned lvaShadowSPslotsVar; // TYP_BLK variable for all the shadow SP slots #endif // FEATURE_EH_FUNCLETS int lvaCachedGenericContextArgOffs; int lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as // THIS pointer #ifdef JIT32_GCENCODER unsigned lvaLocAllocSPvar; // variable which stores the value of ESP after the the last alloca/localloc #endif // JIT32_GCENCODER unsigned lvaNewObjArrayArgs; // variable with arguments for new MD array helper // TODO-Review: Prior to reg predict we reserve 24 bytes for Spill temps. // after the reg predict we will use a computed maxTmpSize // which is based upon the number of spill temps predicted by reg predict // All this is necessary because if we under-estimate the size of the spill // temps we could fail when encoding instructions that reference stack offsets for ARM. // // Pre codegen max spill temp size. static const unsigned MAX_SPILL_TEMP_SIZE = 24; //------------------------------------------------------------------------- unsigned lvaGetMaxSpillTempSize(); #ifdef TARGET_ARM bool lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask); #endif // TARGET_ARM void lvaAssignFrameOffsets(FrameLayoutState curState); void lvaFixVirtualFrameOffsets(); void lvaUpdateArgWithInitialReg(LclVarDsc* varDsc); void lvaUpdateArgsWithInitialReg(); void lvaAssignVirtualFrameOffsetsToArgs(); #ifdef UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs, int* callerArgOffset); #else // !UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs); #endif // !UNIX_AMD64_ABI void lvaAssignVirtualFrameOffsetsToLocals(); int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs); #ifdef TARGET_AMD64 // Returns true if compCalleeRegsPushed (including RBP if used as frame pointer) is even. bool lvaIsCalleeSavedIntRegCountEven(); #endif void lvaAlignFrame(); void lvaAssignFrameOffsetsToPromotedStructs(); int lvaAllocateTemps(int stkOffs, bool mustDoubleAlign); #ifdef DEBUG void lvaDumpRegLocation(unsigned lclNum); void lvaDumpFrameLocation(unsigned lclNum); void lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth = 6); void lvaTableDump(FrameLayoutState curState = NO_FRAME_LAYOUT); // NO_FRAME_LAYOUT means use the current frame // layout state defined by lvaDoneFrameLayout #endif // Limit frames size to 1GB. The maximum is 2GB in theory - make it intentionally smaller // to avoid bugs from borderline cases. #define MAX_FrameSize 0x3FFFFFFF void lvaIncrementFrameSize(unsigned size); unsigned lvaFrameSize(FrameLayoutState curState); // Returns the caller-SP-relative offset for the SP/FP relative offset determined by FP based. int lvaToCallerSPRelativeOffset(int offs, bool isFpBased, bool forRootFrame = true) const; // Returns the caller-SP-relative offset for the local variable "varNum." int lvaGetCallerSPRelativeOffset(unsigned varNum); // Returns the SP-relative offset for the local variable "varNum". Illegal to ask this for functions with localloc. int lvaGetSPRelativeOffset(unsigned varNum); int lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased); int lvaGetInitialSPRelativeOffset(unsigned varNum); // True if this is an OSR compilation and this local is potentially // located on the original method stack frame. bool lvaIsOSRLocal(unsigned varNum); //------------------------ For splitting types ---------------------------- void lvaInitTypeRef(); void lvaInitArgs(InitVarDscInfo* varDscInfo); void lvaInitThisPtr(InitVarDscInfo* varDscInfo); void lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg); void lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs); void lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo); void lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo); void lvaInitVarDsc(LclVarDsc* varDsc, unsigned varNum, CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd, CORINFO_ARG_LIST_HANDLE varList, CORINFO_SIG_INFO* varSig); static unsigned lvaTypeRefMask(var_types type); var_types lvaGetActualType(unsigned lclNum); var_types lvaGetRealType(unsigned lclNum); //------------------------------------------------------------------------- void lvaInit(); LclVarDsc* lvaGetDesc(unsigned lclNum) { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(unsigned lclNum) const { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(const GenTreeLclVarCommon* lclVar) { return lvaGetDesc(lclVar->GetLclNum()); } unsigned lvaTrackedIndexToLclNum(unsigned trackedIndex) { assert(trackedIndex < lvaTrackedCount); unsigned lclNum = lvaTrackedToVarNum[trackedIndex]; assert(lclNum < lvaCount); return lclNum; } LclVarDsc* lvaGetDescByTrackedIndex(unsigned trackedIndex) { return lvaGetDesc(lvaTrackedIndexToLclNum(trackedIndex)); } unsigned lvaGetLclNum(const LclVarDsc* varDsc) { assert((lvaTable <= varDsc) && (varDsc < lvaTable + lvaCount)); // varDsc must point within the table assert(((char*)varDsc - (char*)lvaTable) % sizeof(LclVarDsc) == 0); // varDsc better not point in the middle of a variable unsigned varNum = (unsigned)(varDsc - lvaTable); assert(varDsc == &lvaTable[varNum]); return varNum; } unsigned lvaLclSize(unsigned varNum); unsigned lvaLclExactSize(unsigned varNum); bool lvaHaveManyLocals() const; unsigned lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason)); unsigned lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason)); unsigned lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason)); void lvaSortByRefCount(); void lvaMarkLocalVars(); // Local variable ref-counting void lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers); void lvaMarkLocalVars(BasicBlock* block, bool isRecompute); void lvaAllocOutgoingArgSpaceVar(); // Set up lvaOutgoingArgSpaceVar VARSET_VALRET_TP lvaStmtLclMask(Statement* stmt); #ifdef DEBUG struct lvaStressLclFldArgs { Compiler* m_pCompiler; bool m_bFirstPass; }; static fgWalkPreFn lvaStressLclFldCB; void lvaStressLclFld(); void lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars); void lvaDispVarSet(VARSET_VALARG_TP set); #endif #ifdef TARGET_ARM int lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage); #else int lvaFrameAddress(int varNum, bool* pFPbased); #endif bool lvaIsParameter(unsigned varNum); bool lvaIsRegArgument(unsigned varNum); bool lvaIsOriginalThisArg(unsigned varNum); // Is this varNum the original this argument? bool lvaIsOriginalThisReadOnly(); // return true if there is no place in the code // that writes to arg0 // For x64 this is 3, 5, 6, 7, >8 byte structs that are passed by reference. // For ARM64, this is structs larger than 16 bytes that are passed by reference. bool lvaIsImplicitByRefLocal(unsigned varNum) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) LclVarDsc* varDsc = lvaGetDesc(varNum); if (varDsc->lvIsImplicitByRef) { assert(varDsc->lvIsParam); assert(varTypeIsStruct(varDsc) || (varDsc->lvType == TYP_BYREF)); return true; } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) return false; } // Returns true if this local var is a multireg struct bool lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVararg); // If the local is a TYP_STRUCT, get/set a class handle describing it CORINFO_CLASS_HANDLE lvaGetStruct(unsigned varNum); void lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo = true); void lvaSetStructUsedAsVarArg(unsigned varNum); // If the local is TYP_REF, set or update the associated class information. void lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); void lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); #define MAX_NumOfFieldsInPromotableStruct 4 // Maximum number of fields in promotable struct // Info about struct type fields. struct lvaStructFieldInfo { CORINFO_FIELD_HANDLE fldHnd; unsigned char fldOffset; unsigned char fldOrdinal; var_types fldType; unsigned fldSize; CORINFO_CLASS_HANDLE fldTypeHnd; lvaStructFieldInfo() : fldHnd(nullptr), fldOffset(0), fldOrdinal(0), fldType(TYP_UNDEF), fldSize(0), fldTypeHnd(nullptr) { } }; // Info about a struct type, instances of which may be candidates for promotion. struct lvaStructPromotionInfo { CORINFO_CLASS_HANDLE typeHnd; bool canPromote; bool containsHoles; bool customLayout; bool fieldsSorted; unsigned char fieldCnt; lvaStructFieldInfo fields[MAX_NumOfFieldsInPromotableStruct]; lvaStructPromotionInfo(CORINFO_CLASS_HANDLE typeHnd = nullptr) : typeHnd(typeHnd) , canPromote(false) , containsHoles(false) , customLayout(false) , fieldsSorted(false) , fieldCnt(0) { } }; struct lvaFieldOffsetCmp { bool operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2); }; // This class is responsible for checking validity and profitability of struct promotion. // If it is both legal and profitable, then TryPromoteStructVar promotes the struct and initializes // nessesary information for fgMorphStructField to use. class StructPromotionHelper { public: StructPromotionHelper(Compiler* compiler); bool CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd); bool TryPromoteStructVar(unsigned lclNum); void Clear() { structPromotionInfo.typeHnd = NO_CLASS_HANDLE; } #ifdef DEBUG void CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType); #endif // DEBUG private: bool CanPromoteStructVar(unsigned lclNum); bool ShouldPromoteStructVar(unsigned lclNum); void PromoteStructVar(unsigned lclNum); void SortStructFields(); bool CanConstructAndPromoteField(lvaStructPromotionInfo* structPromotionInfo); lvaStructFieldInfo GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal); bool TryPromoteStructField(lvaStructFieldInfo& outerFieldInfo); private: Compiler* compiler; lvaStructPromotionInfo structPromotionInfo; #ifdef DEBUG typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<CORINFO_FIELD_STRUCT_>, var_types> RetypedAsScalarFieldsMap; RetypedAsScalarFieldsMap retypedFieldsMap; #endif // DEBUG }; StructPromotionHelper* structPromotionHelper; unsigned lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset); lvaPromotionType lvaGetPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetPromotionType(unsigned varNum); lvaPromotionType lvaGetParentPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetParentPromotionType(unsigned varNum); bool lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc); bool lvaIsGCTracked(const LclVarDsc* varDsc); #if defined(FEATURE_SIMD) bool lvaMapSimd12ToSimd16(const LclVarDsc* varDsc) { assert(varDsc->lvType == TYP_SIMD12); assert(varDsc->lvExactSize == 12); #if defined(TARGET_64BIT) assert(compMacOsArm64Abi() || varDsc->lvSize() == 16); #endif // defined(TARGET_64BIT) // We make local variable SIMD12 types 16 bytes instead of just 12. // lvSize() will return 16 bytes for SIMD12, even for fields. // However, we can't do that mapping if the var is a dependently promoted struct field. // Such a field must remain its exact size within its parent struct unless it is a single // field *and* it is the only field in a struct of 16 bytes. if (varDsc->lvSize() != 16) { return false; } if (lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { LclVarDsc* parentVarDsc = lvaGetDesc(varDsc->lvParentLcl); return (parentVarDsc->lvFieldCnt == 1) && (parentVarDsc->lvSize() == 16); } return true; } #endif // defined(FEATURE_SIMD) unsigned lvaGSSecurityCookie; // LclVar number bool lvaTempsHaveLargerOffsetThanVars(); // Returns "true" iff local variable "lclNum" is in SSA form. bool lvaInSsa(unsigned lclNum) { assert(lclNum < lvaCount); return lvaTable[lclNum].lvInSsa; } unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX #if defined(FEATURE_EH_FUNCLETS) unsigned lvaPSPSym; // variable representing the PSPSym #endif InlineInfo* impInlineInfo; // Only present for inlinees InlineStrategy* m_inlineStrategy; InlineContext* compInlineContext; // Always present // The Compiler* that is the root of the inlining tree of which "this" is a member. Compiler* impInlineRoot(); #if defined(DEBUG) || defined(INLINE_DATA) unsigned __int64 getInlineCycleCount() { return m_compCycles; } #endif // defined(DEBUG) || defined(INLINE_DATA) bool fgNoStructPromotion; // Set to TRUE to turn off struct promotion for this method. bool fgNoStructParamPromotion; // Set to TRUE to turn off struct promotion for parameters this method. //========================================================================= // PROTECTED //========================================================================= protected: //---------------- Local variable ref-counting ---------------------------- void lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute); bool IsDominatedByExceptionalEntry(BasicBlock* block); void SetVolatileHint(LclVarDsc* varDsc); // Keeps the mapping from SSA #'s to VN's for the implicit memory variables. SsaDefArray<SsaMemDef> lvMemoryPerSsaData; public: // Returns the address of the per-Ssa data for memory at the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). SsaMemDef* GetMemoryPerSsaData(unsigned ssaNum) { return lvMemoryPerSsaData.GetSsaDef(ssaNum); } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ private: // For prefixFlags enum { PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix PREFIX_TAILCALL_IMPLICIT = 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix PREFIX_TAILCALL_STRESS = 0x00000100, // call doesn't "tail" IL prefix but is treated as explicit because of tail call stress PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT | PREFIX_TAILCALL_STRESS), PREFIX_VOLATILE = 0x00001000, PREFIX_UNALIGNED = 0x00010000, PREFIX_CONSTRAINED = 0x00100000, PREFIX_READONLY = 0x01000000 }; static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix); static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp); static bool impOpcodeIsCallOpcode(OPCODE opcode); public: void impInit(); void impImport(); CORINFO_CLASS_HANDLE impGetRefAnyClass(); CORINFO_CLASS_HANDLE impGetRuntimeArgumentHandle(); CORINFO_CLASS_HANDLE impGetTypeHandleClass(); CORINFO_CLASS_HANDLE impGetStringClass(); CORINFO_CLASS_HANDLE impGetObjectClass(); // Returns underlying type of handles returned by ldtoken instruction var_types GetRuntimeHandleUnderlyingType() { // RuntimeTypeHandle is backed by raw pointer on CoreRT and by object reference on other runtimes return IsTargetAbi(CORINFO_CORERT_ABI) ? TYP_I_IMPL : TYP_REF; } void impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* contextHandle, CORINFO_CONTEXT_HANDLE* exactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset = BAD_IL_OFFSET); //========================================================================= // PROTECTED //========================================================================= protected: //-------------------- Stack manipulation --------------------------------- unsigned impStkSize; // Size of the full stack #define SMALL_STACK_SIZE 16 // number of elements in impSmallStack struct SavedStack // used to save/restore stack contents. { unsigned ssDepth; // number of values on stack StackEntry* ssTrees; // saved tree values }; bool impIsPrimitive(CorInfoType type); bool impILConsumesAddr(const BYTE* codeAddr); void impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind); void impPushOnStack(GenTree* tree, typeInfo ti); void impPushNullObjRefOnStack(); StackEntry impPopStack(); StackEntry& impStackTop(unsigned n = 0); unsigned impStackHeight(); void impSaveStackState(SavedStack* savePtr, bool copy); void impRestoreStackState(SavedStack* savePtr); GenTree* impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); int impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation = false); void impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken); void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); bool impCanPInvokeInline(); bool impCanPInvokeInlineCallSite(BasicBlock* block); void impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block); GenTreeCall* impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di = DebugInfo()); void impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig); void impInsertHelperCall(CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); var_types impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a // type parameter? GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset); CORINFO_CLASS_HANDLE impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE specialIntrinsicHandle); bool impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv); GenTree* impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd); GenTree* impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv); #ifdef DEBUG var_types impImportJitTestLabelMark(int numArgs); #endif // DEBUG GenTree* impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken); GenTree* impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp); GenTree* impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp); static void impBashVarAddrsToI(GenTree* tree1, GenTree* tree2 = nullptr); GenTree* impImplicitIorI4Cast(GenTree* tree, var_types dstTyp); GenTree* impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp); void impImportLeave(BasicBlock* block); void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr); GenTree* impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom); // Mirrors StringComparison.cs enum StringComparison { Ordinal = 4, OrdinalIgnoreCase = 5 }; enum StringComparisonJoint { Eq, // (d1 == cns1) && (s2 == cns2) Xor, // (d1 ^ cns1) | (s2 ^ cns2) }; GenTree* impStringEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impExpandHalfConstEquals(GenTreeLclVar* data, GenTree* lengthFld, bool checkForNull, bool startsWith, WCHAR* cnsData, int len, int dataOffset, StringComparison cmpMode); GenTree* impCreateCompareInd(GenTreeLclVar* obj, var_types type, ssize_t offset, ssize_t value, StringComparison ignoreCase, StringComparisonJoint joint = Eq); GenTree* impExpandHalfConstEqualsSWAR( GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset, StringComparison cmpMode); GenTree* impExpandHalfConstEqualsSIMD( GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset, StringComparison cmpMode); GenTreeStrCon* impGetStrConFromSpan(GenTree* span); GenTree* impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pContstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic = nullptr); GenTree* impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall); NamedIntrinsic lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method); GenTree* impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); #ifdef FEATURE_HW_INTRINSICS GenTree* impHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); GenTree* impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, GenTree* newobjThis); protected: bool compSupportsHWIntrinsic(CORINFO_InstructionSet isa); GenTree* impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, var_types retType, CorInfoType simdBaseJitType, unsigned simdSize, GenTree* newobjThis); GenTree* impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* getArgForHWIntrinsic(var_types argType, CORINFO_CLASS_HANDLE argClass, bool expectAddr = false, GenTree* newobjThis = nullptr); GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType); GenTree* addRangeCheckIfNeeded( NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound); GenTree* addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound); #ifdef TARGET_XARCH GenTree* impBaseIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impBMI1OrBMI2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); #endif // TARGET_XARCH #endif // FEATURE_HW_INTRINSICS GenTree* impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName); GenTree* impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impKeepAliveIntrinsic(GenTree* objToKeepAlive); GenTree* impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform); //----------------- Manipulating the trees and stmts ---------------------- Statement* impStmtList; // Statements for the BB being imported. Statement* impLastStmt; // The last statement for the current BB. public: enum { CHECK_SPILL_ALL = -1, CHECK_SPILL_NONE = -2 }; void impBeginTreeList(); void impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt); void impEndTreeList(BasicBlock* block); void impAppendStmtCheck(Statement* stmt, unsigned chkLevel); void impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo = true); void impAppendStmt(Statement* stmt); void impInsertStmtBefore(Statement* stmt, Statement* stmtBefore); Statement* impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo = true); void impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore); void impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel = (unsigned)CHECK_SPILL_NONE, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); void impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); Statement* impExtractLastStmt(); GenTree* impCloneExpr(GenTree* tree, GenTree** clone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)); GenTree* impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impAssignStructPtr(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref); var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* simdBaseJitType = nullptr); GenTree* impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization = false); GenTree* impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false, bool importParent = false); GenTree* impParentClassTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false) { return impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, true); } GenTree* impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTree* getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind); GenTree* impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTreeCall* impReadyToRunHelperToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args = nullptr, CORINFO_LOOKUP_KIND* pGenericLookupKind = nullptr); bool impIsCastHelperEligibleForClassProbe(GenTree* tree); bool impIsCastHelperMayHaveProfileData(GenTree* tree); GenTree* impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset); GenTree* impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass); bool VarTypeIsMultiByteAndCanEnreg(var_types type, CORINFO_CLASS_HANDLE typeClass, unsigned* typeSize, bool forReturn, bool isVarArg, CorInfoCallConvExtension callConv); bool IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName); bool IsTargetIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(GenTree* tree); private: //----------------- Importing the method ---------------------------------- CORINFO_CONTEXT_HANDLE impTokenLookupContextHandle; // The context used for looking up tokens. #ifdef DEBUG unsigned impCurOpcOffs; const char* impCurOpcName; bool impNestedStackSpill; // For displaying instrs with generated native code (-n:B) Statement* impLastILoffsStmt; // oldest stmt added for which we did not call SetLastILOffset(). void impNoteLastILoffs(); #endif // Debug info of current statement being imported. It gets set to contain // no IL location (!impCurStmtDI.GetLocation().IsValid) after it has been // set in the appended trees. Then it gets updated at IL instructions for // which we have to report mapping info. // It will always contain the current inline context. DebugInfo impCurStmtDI; DebugInfo impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall); void impCurStmtOffsSet(IL_OFFSET offs); void impNoteBranchOffs(); unsigned impInitBlockLineInfo(); bool impIsThis(GenTree* obj); bool impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsAnySTLOC(OPCODE opcode) { return ((opcode == CEE_STLOC) || (opcode == CEE_STLOC_S) || ((opcode >= CEE_STLOC_0) && (opcode <= CEE_STLOC_3))); } GenTreeCall::Use* impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs = nullptr); bool impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const; GenTreeCall::Use* impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount = 0); //---------------- Spilling the importer stack ---------------------------- // The maximum number of bytes of IL processed without clean stack state. // It allows to limit the maximum tree size and depth. static const unsigned MAX_TREE_SIZE = 200; bool impCanSpillNow(OPCODE prevOpcode); struct PendingDsc { PendingDsc* pdNext; BasicBlock* pdBB; SavedStack pdSavedStack; ThisInitState pdThisPtrInit; }; PendingDsc* impPendingList; // list of BBs currently waiting to be imported. PendingDsc* impPendingFree; // Freed up dscs that can be reused // We keep a byte-per-block map (dynamically extended) in the top-level Compiler object of a compilation. JitExpandArray<BYTE> impPendingBlockMembers; // Return the byte for "b" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. BYTE impGetPendingBlockMember(BasicBlock* blk) { return impInlineRoot()->impPendingBlockMembers.Get(blk->bbInd()); } // Set the byte for "b" to "val" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. void impSetPendingBlockMember(BasicBlock* blk, BYTE val) { impInlineRoot()->impPendingBlockMembers.Set(blk->bbInd(), val); } bool impCanReimport; bool impSpillStackEntry(unsigned level, unsigned varNum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ); void impSpillStackEnsure(bool spillLeaves = false); void impEvalSideEffects(); void impSpillSpecialSideEff(); void impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)); void impSpillValueClasses(); void impSpillEvalStack(); static fgWalkPreFn impFindValueClasses; void impSpillLclRefs(ssize_t lclNum); BasicBlock* impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter); bool impBlockIsInALoop(BasicBlock* block); void impImportBlockCode(BasicBlock* block); void impReimportMarkBlock(BasicBlock* block); void impReimportMarkSuccessors(BasicBlock* block); void impVerifyEHBlock(BasicBlock* block, bool isTryStart); void impImportBlockPending(BasicBlock* block); // Similar to impImportBlockPending, but assumes that block has already been imported once and is being // reimported for some reason. It specifically does *not* look at verCurrentState to set the EntryState // for the block, but instead, just re-uses the block's existing EntryState. void impReimportBlockPending(BasicBlock* block); var_types impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2); void impImportBlock(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We will assign the values // on the stack to local variables (the "spill temp" variables). The successor blocks will assume that // its incoming stack contents are in those locals. This requires "block" and its successors to agree on // the variables that will be used -- and for all the predecessors of those successors, and the // successors of those predecessors, etc. Call such a set of blocks closed under alternating // successor/predecessor edges a "spill clique." A block is a "predecessor" or "successor" member of the // clique (or, conceivably, both). Each block has a specified sequence of incoming and outgoing spill // temps. If "block" already has its outgoing spill temps assigned (they are always a contiguous series // of local variable numbers, so we represent them with the base local variable number), returns that. // Otherwise, picks a set of spill temps, and propagates this choice to all blocks in the spill clique of // which "block" is a member (asserting, in debug mode, that no block in this clique had its spill temps // chosen already. More precisely, that the incoming or outgoing spill temps are not chosen, depending // on which kind of member of the clique the block is). unsigned impGetSpillTmpBase(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We have previously // assigned the values on the stack to local variables (the "spill temp" variables). The successor blocks // will assume that its incoming stack contents are in those locals. This requires "block" and its // successors to agree on the variables and their types that will be used. The CLI spec allows implicit // conversions between 'int' and 'native int' or 'float' and 'double' stack types. So one predecessor can // push an int and another can push a native int. For 64-bit we have chosen to implement this by typing // the "spill temp" as native int, and then importing (or re-importing as needed) so that all the // predecessors in the "spill clique" push a native int (sign-extending if needed), and all the // successors receive a native int. Similarly float and double are unified to double. // This routine is called after a type-mismatch is detected, and it will walk the spill clique to mark // blocks for re-importation as appropriate (both successors, so they get the right incoming type, and // predecessors, so they insert an upcast if needed). void impReimportSpillClique(BasicBlock* block); // When we compute a "spill clique" (see above) these byte-maps are allocated to have a byte per basic // block, and represent the predecessor and successor members of the clique currently being computed. // *** Access to these will need to be locked in a parallel compiler. JitExpandArray<BYTE> impSpillCliquePredMembers; JitExpandArray<BYTE> impSpillCliqueSuccMembers; enum SpillCliqueDir { SpillCliquePred, SpillCliqueSucc }; // Abstract class for receiving a callback while walking a spill clique class SpillCliqueWalker { public: virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) = 0; }; // This class is used for setting the bbStkTempsIn and bbStkTempsOut on the blocks within a spill clique class SetSpillTempsBase : public SpillCliqueWalker { unsigned m_baseTmp; public: SetSpillTempsBase(unsigned baseTmp) : m_baseTmp(baseTmp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This class is used for implementing impReimportSpillClique part on each block within the spill clique class ReimportSpillClique : public SpillCliqueWalker { Compiler* m_pComp; public: ReimportSpillClique(Compiler* pComp) : m_pComp(pComp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This is the heart of the algorithm for walking spill cliques. It invokes callback->Visit for each // predecessor or successor within the spill clique void impWalkSpillCliqueFromPred(BasicBlock* pred, SpillCliqueWalker* callback); // For a BasicBlock that has already been imported, the EntryState has an array of GenTrees for the // incoming locals. This walks that list an resets the types of the GenTrees to match the types of // the VarDscs. They get out of sync when we have int/native int issues (see impReimportSpillClique). void impRetypeEntryStateTemps(BasicBlock* blk); BYTE impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk); void impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val); void impPushVar(GenTree* op, typeInfo tiRetVal); GenTreeLclVar* impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)); void impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal); void impLoadVar(unsigned lclNum, IL_OFFSET offset) { impLoadVar(lclNum, offset, lvaGetDesc(lclNum)->lvVerTypeInfo); } void impLoadArg(unsigned ilArgNum, IL_OFFSET offset); void impLoadLoc(unsigned ilLclNum, IL_OFFSET offset); bool impReturnInstruction(int prefixFlags, OPCODE& opcode); #ifdef TARGET_ARM void impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* op, CORINFO_CLASS_HANDLE hClass); #endif // A free list of linked list nodes used to represent to-do stacks of basic blocks. struct BlockListNode { BasicBlock* m_blk; BlockListNode* m_next; BlockListNode(BasicBlock* blk, BlockListNode* next = nullptr) : m_blk(blk), m_next(next) { } void* operator new(size_t sz, Compiler* comp); }; BlockListNode* impBlockListNodeFreeList; void FreeBlockListNode(BlockListNode* node); bool impIsValueType(typeInfo* pTypeInfo); var_types mangleVarArgsType(var_types type); regNumber getCallArgIntRegister(regNumber floatReg); regNumber getCallArgFloatRegister(regNumber intReg); #if defined(DEBUG) static unsigned jitTotalMethodCompiled; #endif #ifdef DEBUG static LONG jitNestingLevel; #endif // DEBUG static bool impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut = nullptr); void impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult); // STATIC inlining decision based on the IL code. void impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult); void impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult); void impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult); void impInlineInitVars(InlineInfo* pInlineInfo); unsigned impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)); GenTree* impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclTypeInfo); bool impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo); bool impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo); void impMarkInlineCandidate(GenTree* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); void impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); bool impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv); bool impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive); bool impIsImplicitTailCallCandidate( OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive); bool impIsClassExact(CORINFO_CLASS_HANDLE classHnd); bool impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array); CORINFO_RESOLVED_TOKEN* impAllocateToken(const CORINFO_RESOLVED_TOKEN& token); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX FlowGraph XX XX XX XX Info about the basic-blocks, their contents and the flow analysis XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: BasicBlock* fgFirstBB; // Beginning of the basic block list BasicBlock* fgLastBB; // End of the basic block list BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section BasicBlock* fgEntryBB; // For OSR, the original method's entry point BasicBlock* fgOSREntryBB; // For OSR, the logical entry point (~ patchpoint) #if defined(FEATURE_EH_FUNCLETS) BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets) #endif BasicBlock* fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been // created. BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks unsigned fgEdgeCount; // # of control flow edges between the BBs unsigned fgBBcount; // # of BBs in the method #ifdef DEBUG unsigned fgBBcountAtCodegen; // # of BBs in the method at the start of codegen #endif unsigned fgBBNumMax; // The max bbNum that has been assigned to basic blocks unsigned fgDomBBcount; // # of BBs for which we have dominator and reachability information BasicBlock** fgBBInvPostOrder; // The flow graph stored in an array sorted in topological order, needed to compute // dominance. Indexed by block number. Size: fgBBNumMax + 1. // After the dominance tree is computed, we cache a DFS preorder number and DFS postorder number to compute // dominance queries in O(1). fgDomTreePreOrder and fgDomTreePostOrder are arrays giving the block's preorder and // postorder number, respectively. The arrays are indexed by basic block number. (Note that blocks are numbered // starting from one. Thus, we always waste element zero. This makes debugging easier and makes the code less likely // to suffer from bugs stemming from forgetting to add or subtract one from the block number to form an array // index). The arrays are of size fgBBNumMax + 1. unsigned* fgDomTreePreOrder; unsigned* fgDomTreePostOrder; // Dominator tree used by SSA construction and copy propagation (the two are expected to use the same tree // in order to avoid the need for SSA reconstruction and an "out of SSA" phase). DomTreeNode* fgSsaDomTree; bool fgBBVarSetsInited; // Allocate array like T* a = new T[fgBBNumMax + 1]; // Using helper so we don't keep forgetting +1. template <typename T> T* fgAllocateTypeForEachBlk(CompMemKind cmk = CMK_Unknown) { return getAllocator(cmk).allocate<T>(fgBBNumMax + 1); } // BlockSets are relative to a specific set of BasicBlock numbers. If that changes // (if the blocks are renumbered), this changes. BlockSets from different epochs // cannot be meaningfully combined. Note that new blocks can be created with higher // block numbers without changing the basic block epoch. These blocks *cannot* // participate in a block set until the blocks are all renumbered, causing the epoch // to change. This is useful if continuing to use previous block sets is valuable. // If the epoch is zero, then it is uninitialized, and block sets can't be used. unsigned fgCurBBEpoch; unsigned GetCurBasicBlockEpoch() { return fgCurBBEpoch; } // The number of basic blocks in the current epoch. When the blocks are renumbered, // this is fgBBcount. As blocks are added, fgBBcount increases, fgCurBBEpochSize remains // the same, until a new BasicBlock epoch is created, such as when the blocks are all renumbered. unsigned fgCurBBEpochSize; // The number of "size_t" elements required to hold a bitset large enough for fgCurBBEpochSize // bits. This is precomputed to avoid doing math every time BasicBlockBitSetTraits::GetArrSize() is called. unsigned fgBBSetCountInSizeTUnits; void NewBasicBlockEpoch() { INDEBUG(unsigned oldEpochArrSize = fgBBSetCountInSizeTUnits); // We have a new epoch. Compute and cache the size needed for new BlockSets. fgCurBBEpoch++; fgCurBBEpochSize = fgBBNumMax + 1; fgBBSetCountInSizeTUnits = roundUp(fgCurBBEpochSize, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); #ifdef DEBUG // All BlockSet objects are now invalid! fgReachabilitySetsValid = false; // the bbReach sets are now invalid! fgEnterBlksSetValid = false; // the fgEnterBlks set is now invalid! if (verbose) { unsigned epochArrSize = BasicBlockBitSetTraits::GetArrSize(this, sizeof(size_t)); printf("\nNew BlockSet epoch %d, # of blocks (including unused BB00): %u, bitset array size: %u (%s)", fgCurBBEpoch, fgCurBBEpochSize, epochArrSize, (epochArrSize <= 1) ? "short" : "long"); if ((fgCurBBEpoch != 1) && ((oldEpochArrSize <= 1) != (epochArrSize <= 1))) { // If we're not just establishing the first epoch, and the epoch array size has changed such that we're // going to change our bitset representation from short (just a size_t bitset) to long (a pointer to an // array of size_t bitsets), then print that out. printf("; NOTE: BlockSet size was previously %s!", (oldEpochArrSize <= 1) ? "short" : "long"); } printf("\n"); } #endif // DEBUG } void EnsureBasicBlockEpoch() { if (fgCurBBEpochSize != fgBBNumMax + 1) { NewBasicBlockEpoch(); } } BasicBlock* fgNewBasicBlock(BBjumpKinds jumpKind); void fgEnsureFirstBBisScratch(); bool fgFirstBBisScratch(); bool fgBBisScratch(BasicBlock* block); void fgExtendEHRegionBefore(BasicBlock* block); void fgExtendEHRegionAfter(BasicBlock* block); BasicBlock* fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, unsigned tryIndex, unsigned hndIndex, BasicBlock* nearBlk, bool putInFilter = false, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, BasicBlock* srcBlk, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind); BasicBlock* fgNewBBinRegionWorker(BBjumpKinds jumpKind, BasicBlock* afterBlk, unsigned xcptnIndex, bool putInTryRegion); void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk); void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk); void fgUnlinkBlock(BasicBlock* block); #ifdef FEATURE_JIT_METHOD_PERF unsigned fgMeasureIR(); #endif // FEATURE_JIT_METHOD_PERF bool fgModified; // True if the flow graph has been modified recently bool fgComputePredsDone; // Have we computed the bbPreds list bool fgCheapPredsValid; // Is the bbCheapPreds list valid? bool fgDomsComputed; // Have we computed the dominator sets? bool fgReturnBlocksComputed; // Have we computed the return blocks list? bool fgOptimizedFinally; // Did we optimize any try-finallys? bool fgHasSwitch; // any BBJ_SWITCH jumps? BlockSet fgEnterBlks; // Set of blocks which have a special transfer of control; the "entry" blocks plus EH handler // begin blocks. #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) BlockSet fgAlwaysBlks; // Set of blocks which are BBJ_ALWAYS part of BBJ_CALLFINALLY/BBJ_ALWAYS pair that should // never be removed due to a requirement to use the BBJ_ALWAYS for generating code and // not have "retless" blocks. #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef DEBUG bool fgReachabilitySetsValid; // Are the bbReach sets valid? bool fgEnterBlksSetValid; // Is the fgEnterBlks set valid? #endif // DEBUG bool fgRemoveRestOfBlock; // true if we know that we will throw bool fgStmtRemoved; // true if we remove statements -> need new DFA // There are two modes for ordering of the trees. // - In FGOrderTree, the dominant ordering is the tree order, and the nodes contained in // each tree and sub-tree are contiguous, and can be traversed (in gtNext/gtPrev order) // by traversing the tree according to the order of the operands. // - In FGOrderLinear, the dominant ordering is the linear order. enum FlowGraphOrder { FGOrderTree, FGOrderLinear }; FlowGraphOrder fgOrder; // The following are boolean flags that keep track of the state of internal data structures bool fgStmtListThreaded; // true if the node list is now threaded bool fgCanRelocateEHRegions; // true if we are allowed to relocate the EH regions bool fgEdgeWeightsComputed; // true after we have called fgComputeEdgeWeights bool fgHaveValidEdgeWeights; // true if we were successful in computing all of the edge weights bool fgSlopUsedInEdgeWeights; // true if their was some slop used when computing the edge weights bool fgRangeUsedInEdgeWeights; // true if some of the edgeWeight are expressed in Min..Max form bool fgNeedsUpdateFlowGraph; // true if we need to run fgUpdateFlowGraph weight_t fgCalledCount; // count of the number of times this method was called // This is derived from the profile data // or is BB_UNITY_WEIGHT when we don't have profile data #if defined(FEATURE_EH_FUNCLETS) bool fgFuncletsCreated; // true if the funclet creation phase has been run #endif // FEATURE_EH_FUNCLETS bool fgGlobalMorph; // indicates if we are during the global morphing phase // since fgMorphTree can be called from several places bool impBoxTempInUse; // the temp below is valid and available unsigned impBoxTemp; // a temporary that is used for boxing #ifdef DEBUG bool jitFallbackCompile; // Are we doing a fallback compile? That is, have we executed a NO_WAY assert, // and we are trying to compile again in a "safer", minopts mode? #endif #if defined(DEBUG) unsigned impInlinedCodeSize; bool fgPrintInlinedMethods; #endif jitstd::vector<flowList*>* fgPredListSortVector; //------------------------------------------------------------------------- void fgInit(); PhaseStatus fgImport(); PhaseStatus fgTransformIndirectCalls(); PhaseStatus fgTransformPatchpoints(); PhaseStatus fgInline(); PhaseStatus fgRemoveEmptyTry(); PhaseStatus fgRemoveEmptyFinally(); PhaseStatus fgMergeFinallyChains(); PhaseStatus fgCloneFinally(); void fgCleanupContinuation(BasicBlock* continuation); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgUpdateFinallyTargetFlags(); void fgClearAllFinallyTargetBits(); void fgAddFinallyTargetFlags(); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgTailMergeThrows(); void fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); void fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); GenTree* fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Sometimes we need to defer updating the BBF_FINALLY_TARGET bit. fgNeedToAddFinallyTargetBits signals // when this is necessary. bool fgNeedToAddFinallyTargetBits; #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) bool fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, BasicBlock* handler, BlockToBlockMap& continuationMap); GenTree* fgGetCritSectOfStaticMethod(); #if defined(FEATURE_EH_FUNCLETS) void fgAddSyncMethodEnterExit(); GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter); void fgConvertSyncReturnToLeave(BasicBlock* block); #endif // FEATURE_EH_FUNCLETS void fgAddReversePInvokeEnterExit(); bool fgMoreThanOneReturnBlock(); // The number of separate return points in the method. unsigned fgReturnCount; void fgAddInternal(); enum class FoldResult { FOLD_DID_NOTHING, FOLD_CHANGED_CONTROL_FLOW, FOLD_REMOVED_LAST_STMT, FOLD_ALTERED_LAST_STMT, }; FoldResult fgFoldConditional(BasicBlock* block); void fgMorphStmts(BasicBlock* block); void fgMorphBlocks(); void fgMergeBlockReturn(BasicBlock* block); bool fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg)); void fgSetOptions(); #ifdef DEBUG static fgWalkPreFn fgAssertNoQmark; void fgPreExpandQmarkChecks(GenTree* expr); void fgPostExpandQmarkChecks(); static void fgCheckQmarkAllowedForm(GenTree* tree); #endif IL_OFFSET fgFindBlockILOffset(BasicBlock* block); void fgFixEntryFlowForOSR(); BasicBlock* fgSplitBlockAtBeginning(BasicBlock* curr); BasicBlock* fgSplitBlockAtEnd(BasicBlock* curr); BasicBlock* fgSplitBlockAfterStatement(BasicBlock* curr, Statement* stmt); BasicBlock* fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node); // for LIR BasicBlock* fgSplitEdge(BasicBlock* curr, BasicBlock* succ); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block, const DebugInfo& di); Statement* fgNewStmtFromTree(GenTree* tree); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block); Statement* fgNewStmtFromTree(GenTree* tree, const DebugInfo& di); GenTree* fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst = nullptr); void fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt); void fgExpandQmarkStmt(BasicBlock* block, Statement* stmt); void fgExpandQmarkNodes(); // Do "simple lowering." This functionality is (conceptually) part of "general" // lowering that is distributed between fgMorph and the lowering phase of LSRA. void fgSimpleLowering(); GenTree* fgInitThisClass(); GenTreeCall* fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper); GenTreeCall* fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls); bool backendRequiresLocalVarLifetimes() { return !opts.MinOpts() || m_pLinearScan->willEnregisterLocalVars(); } void fgLocalVarLiveness(); void fgLocalVarLivenessInit(); void fgPerNodeLocalVarLiveness(GenTree* node); void fgPerBlockLocalVarLiveness(); VARSET_VALRET_TP fgGetHandlerLiveVars(BasicBlock* block); void fgLiveVarAnalysis(bool updateInternalOnly = false); void fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call); void fgComputeLifeTrackedLocalUse(VARSET_TP& life, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeTrackedLocalDef(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeUntrackedLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* lclVarNode); bool fgComputeLifeLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, GenTree* lclVarNode); void fgComputeLife(VARSET_TP& life, GenTree* startNode, GenTree* endNode, VARSET_VALARG_TP volatileVars, bool* pStmtInfoDirty DEBUGARG(bool* treeModf)); void fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALARG_TP volatileVars); bool fgTryRemoveNonLocal(GenTree* node, LIR::Range* blockRange); void fgRemoveDeadStoreLIR(GenTree* store, BasicBlock* block); bool fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_VALARG_TP life, bool* doAgain, bool* pStmtInfoDirty, bool* pStoreRemoved DEBUGARG(bool* treeModf)); void fgInterBlockLocalVarLiveness(); // Blocks: convenience methods for enabling range-based `for` iteration over the function's blocks, e.g.: // 1. for (BasicBlock* const block : compiler->Blocks()) ... // 2. for (BasicBlock* const block : compiler->Blocks(startBlock)) ... // 3. for (BasicBlock* const block : compiler->Blocks(startBlock, endBlock)) ... // In case (1), the block list can be empty. In case (2), `startBlock` can be nullptr. In case (3), // both `startBlock` and `endBlock` must be non-null. // BasicBlockSimpleList Blocks() const { return BasicBlockSimpleList(fgFirstBB); } BasicBlockSimpleList Blocks(BasicBlock* startBlock) const { return BasicBlockSimpleList(startBlock); } BasicBlockRangeList Blocks(BasicBlock* startBlock, BasicBlock* endBlock) const { return BasicBlockRangeList(startBlock, endBlock); } // The presence of a partial definition presents some difficulties for SSA: this is both a use of some SSA name // of "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose // whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us // to record/recover the "def" SSA number, given the lcl var node for "x" in such a tree. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, unsigned> NodeToUnsignedMap; NodeToUnsignedMap* m_opAsgnVarDefSsaNums; NodeToUnsignedMap* GetOpAsgnVarDefSsaNums() { if (m_opAsgnVarDefSsaNums == nullptr) { m_opAsgnVarDefSsaNums = new (getAllocator()) NodeToUnsignedMap(getAllocator()); } return m_opAsgnVarDefSsaNums; } // This map tracks nodes whose value numbers explicitly or implicitly depend on memory states. // The map provides the entry block of the most closely enclosing loop that // defines the memory region accessed when defining the nodes's VN. // // This information should be consulted when considering hoisting node out of a loop, as the VN // for the node will only be valid within the indicated loop. // // It is not fine-grained enough to track memory dependence within loops, so cannot be used // for more general code motion. // // If a node does not have an entry in the map we currently assume the VN is not memory dependent // and so memory does not constrain hoisting. // typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, BasicBlock*> NodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* m_nodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* GetNodeToLoopMemoryBlockMap() { if (m_nodeToLoopMemoryBlockMap == nullptr) { m_nodeToLoopMemoryBlockMap = new (getAllocator()) NodeToLoopMemoryBlockMap(getAllocator()); } return m_nodeToLoopMemoryBlockMap; } void optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN); void optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree); // Requires value numbering phase to have completed. Returns the value number ("gtVN") of the // "tree," EXCEPT in the case of GTF_VAR_USEASG, because the tree node's gtVN member is the // "use" VN. Performs a lookup into the map of (use asg tree -> def VN.) to return the "def's" // VN. inline ValueNum GetUseAsgDefVNOrTreeVN(GenTree* tree); // Requires that "lcl" has the GTF_VAR_DEF flag set. Returns the SSA number of "lcl". // Except: assumes that lcl is a def, and if it is // a partial def (GTF_VAR_USEASG), looks up and returns the SSA number for the "def", // rather than the "use" SSA number recorded in the tree "lcl". inline unsigned GetSsaNumForLocalVarDef(GenTree* lcl); inline bool PreciseRefCountsRequired(); // Performs SSA conversion. void fgSsaBuild(); // Reset any data structures to the state expected by "fgSsaBuild", so it can be run again. void fgResetForSsa(); unsigned fgSsaPassesCompleted; // Number of times fgSsaBuild has been run. // Returns "true" if this is a special variable that is never zero initialized in the prolog. inline bool fgVarIsNeverZeroInitializedInProlog(unsigned varNum); // Returns "true" if the variable needs explicit zero initialization. inline bool fgVarNeedsExplicitZeroInit(unsigned varNum, bool bbInALoop, bool bbIsReturn); // The value numbers for this compilation. ValueNumStore* vnStore; public: ValueNumStore* GetValueNumStore() { return vnStore; } // Do value numbering (assign a value number to each // tree node). void fgValueNumber(); // Computes new GcHeap VN via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN. // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // The 'indType' is the indirection type of the lhs of the assignment and will typically // match the element type of the array or fldSeq. When this type doesn't match // or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN] // ValueNum fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, FieldSeqNode* fldSeq, ValueNum rhsVN, var_types indType); // Requires that "tree" is a GT_IND marked as an array index, and that its address argument // has been parsed to yield the other input arguments. If evaluation of the address // can raise exceptions, those should be captured in the exception set "addrXvnp". // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // Marks "tree" with the VN for H[elemTypeEq][arrVN][inx][fldSeq] (for the liberal VN; a new unique // VN for the conservative VN.) Also marks the tree's argument as the address of an array element. // The type tree->TypeGet() will typically match the element type of the array or fldSeq. // When this type doesn't match or if the fldSeq is 'NotAField' we return a new unique VN // ValueNum fgValueNumberArrIndexVal(GenTree* tree, CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, ValueNumPair addrXvnp, FieldSeqNode* fldSeq); // Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvnp" to represent the exception set thrown // by evaluating the array index expression "tree". Returns the value number resulting from // dereferencing the array in the current GcHeap state. If "tree" is non-null, it must be the // "GT_IND" that does the dereference, and it is given the returned value number. ValueNum fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* funcApp, ValueNumPair addrXvnp); // Compute the value number for a byref-exposed load of the given type via the given pointerVN. ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN); unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run. // Utility functions for fgValueNumber. // Perform value-numbering for the trees in "blk". void fgValueNumberBlock(BasicBlock* blk); // Requires that "entryBlock" is the entry block of loop "loopNum", and that "loopNum" is the // innermost loop of which "entryBlock" is the entry. Returns the value number that should be // assumed for the memoryKind at the start "entryBlk". ValueNum fgMemoryVNForLoopSideEffects(MemoryKind memoryKind, BasicBlock* entryBlock, unsigned loopNum); // Called when an operation (performed by "tree", described by "msg") may cause the GcHeap to be mutated. // As GcHeap is a subset of ByrefExposed, this will also annotate the ByrefExposed mutation. void fgMutateGcHeap(GenTree* tree DEBUGARG(const char* msg)); // Called when an operation (performed by "tree", described by "msg") may cause an address-exposed local to be // mutated. void fgMutateAddressExposedLocal(GenTree* tree DEBUGARG(const char* msg)); // For a GC heap store at curTree, record the new curMemoryVN's and update curTree's MemorySsaMap. // As GcHeap is a subset of ByrefExposed, this will also record the ByrefExposed store. void recordGcHeapStore(GenTree* curTree, ValueNum gcHeapVN DEBUGARG(const char* msg)); // For a store to an address-exposed local at curTree, record the new curMemoryVN and update curTree's MemorySsaMap. void recordAddressExposedLocalStore(GenTree* curTree, ValueNum memoryVN DEBUGARG(const char* msg)); void fgSetCurrentMemoryVN(MemoryKind memoryKind, ValueNum newMemoryVN); // Tree caused an update in the current memory VN. If "tree" has an associated heap SSA #, record that // value in that SSA #. void fgValueNumberRecordMemorySsa(MemoryKind memoryKind, GenTree* tree); // The input 'tree' is a leaf node that is a constant // Assign the proper value number to the tree void fgValueNumberTreeConst(GenTree* tree); // If the VN store has been initialized, reassign the // proper value number to the constant tree. void fgUpdateConstTreeValueNumber(GenTree* tree); // Assumes that all inputs to "tree" have had value numbers assigned; assigns a VN to tree. // (With some exceptions: the VN of the lhs of an assignment is assigned as part of the // assignment.) void fgValueNumberTree(GenTree* tree); void fgValueNumberAssignment(GenTreeOp* tree); // Does value-numbering for a block assignment. void fgValueNumberBlockAssignment(GenTree* tree); bool fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, FieldSeqNode* dstFldSeq, GenTree* src); // Does value-numbering for a cast tree. void fgValueNumberCastTree(GenTree* tree); // Does value-numbering for an intrinsic tree. void fgValueNumberIntrinsic(GenTree* tree); #ifdef FEATURE_SIMD // Does value-numbering for a GT_SIMD tree void fgValueNumberSimd(GenTreeSIMD* tree); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS // Does value-numbering for a GT_HWINTRINSIC tree void fgValueNumberHWIntrinsic(GenTreeHWIntrinsic* tree); #endif // FEATURE_HW_INTRINSICS // Does value-numbering for a call. We interpret some helper calls. void fgValueNumberCall(GenTreeCall* call); // Does value-numbering for a helper representing a cast operation. void fgValueNumberCastHelper(GenTreeCall* call); // Does value-numbering for a helper "call" that has a VN function symbol "vnf". void fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc); // Requires "helpCall" to be a helper call. Assigns it a value number; // we understand the semantics of some of the calls. Returns "true" if // the call may modify the heap (we assume arbitrary memory side effects if so). bool fgValueNumberHelperCall(GenTreeCall* helpCall); // Requires that "helpFunc" is one of the pure Jit Helper methods. // Returns the corresponding VNFunc to use for value numbering VNFunc fgValueNumberJitHelperMethodVNFunc(CorInfoHelpFunc helpFunc); // Adds the exception set for the current tree node which has a memory indirection operation void fgValueNumberAddExceptionSetForIndirection(GenTree* tree, GenTree* baseAddr); // Adds the exception sets for the current tree node which is performing a division or modulus operation void fgValueNumberAddExceptionSetForDivision(GenTree* tree); // Adds the exception set for the current tree node which is performing a overflow checking operation void fgValueNumberAddExceptionSetForOverflow(GenTree* tree); // Adds the exception set for the current tree node which is performing a bounds check operation void fgValueNumberAddExceptionSetForBoundsCheck(GenTree* tree); // Adds the exception set for the current tree node which is performing a ckfinite operation void fgValueNumberAddExceptionSetForCkFinite(GenTree* tree); // Adds the exception sets for the current tree node void fgValueNumberAddExceptionSet(GenTree* tree); #ifdef DEBUG void fgDebugCheckExceptionSets(); void fgDebugCheckValueNumberedTree(GenTree* tree); #endif // These are the current value number for the memory implicit variables while // doing value numbering. These are the value numbers under the "liberal" interpretation // of memory values; the "conservative" interpretation needs no VN, since every access of // memory yields an unknown value. ValueNum fgCurMemoryVN[MemoryKindCount]; // Return a "pseudo"-class handle for an array element type. If "elemType" is TYP_STRUCT, // requires "elemStructType" to be non-null (and to have a low-order zero). Otherwise, low order bit // is 1, and the rest is an encoding of "elemTyp". static CORINFO_CLASS_HANDLE EncodeElemType(var_types elemTyp, CORINFO_CLASS_HANDLE elemStructType) { if (elemStructType != nullptr) { assert(varTypeIsStruct(elemTyp) || elemTyp == TYP_REF || elemTyp == TYP_BYREF || varTypeIsIntegral(elemTyp)); assert((size_t(elemStructType) & 0x1) == 0x0); // Make sure the encoding below is valid. return elemStructType; } else { assert(elemTyp != TYP_STRUCT); elemTyp = varTypeToSigned(elemTyp); return CORINFO_CLASS_HANDLE(size_t(elemTyp) << 1 | 0x1); } } // If "clsHnd" is the result of an "EncodePrim" call, returns true and sets "*pPrimType" to the // var_types it represents. Otherwise, returns TYP_STRUCT (on the assumption that "clsHnd" is // the struct type of the element). static var_types DecodeElemType(CORINFO_CLASS_HANDLE clsHnd) { size_t clsHndVal = size_t(clsHnd); if (clsHndVal & 0x1) { return var_types(clsHndVal >> 1); } else { return TYP_STRUCT; } } // Convert a BYTE which represents the VM's CorInfoGCtype to the JIT's var_types var_types getJitGCType(BYTE gcType); // Returns true if the provided type should be treated as a primitive type // for the unmanaged calling conventions. bool isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd); enum structPassingKind { SPK_Unknown, // Invalid value, never returned SPK_PrimitiveType, // The struct is passed/returned using a primitive type. SPK_EnclosingType, // Like SPK_Primitive type, but used for return types that // require a primitive type temp that is larger than the struct size. // Currently used for structs of size 3, 5, 6, or 7 bytes. SPK_ByValue, // The struct is passed/returned by value (using the ABI rules) // for ARM64 and UNIX_X64 in multiple registers. (when all of the // parameters registers are used, then the stack will be used) // for X86 passed on the stack, for ARM32 passed in registers // or the stack or split between registers and the stack. SPK_ByValueAsHfa, // The struct is passed/returned as an HFA in multiple registers. SPK_ByReference }; // The struct is passed/returned by reference to a copy/buffer. // Get the "primitive" type that is is used when we are given a struct of size 'structSize'. // For pointer sized structs the 'clsHnd' is used to determine if the struct contains GC ref. // A "primitive" type is one of the scalar types: byte, short, int, long, ref, float, double // If we can't or shouldn't use a "primitive" type then TYP_UNKNOWN is returned. // // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg); // Get the type that is used to pass values of the given struct type. // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, structPassingKind* wbPassStruct, bool isVarArg, unsigned structSize); // Get the type that is used to return values of the given struct type. // If the size is unknown, pass 0 and it will be determined from 'clsHnd'. var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, CorInfoCallConvExtension callConv, structPassingKind* wbPassStruct = nullptr, unsigned structSize = 0); #ifdef DEBUG // Print a representation of "vnp" or "vn" on standard output. // If "level" is non-zero, we also print out a partial expansion of the value. void vnpPrint(ValueNumPair vnp, unsigned level); void vnPrint(ValueNum vn, unsigned level); #endif bool fgDominate(BasicBlock* b1, BasicBlock* b2); // Return true if b1 dominates b2 // Dominator computation member functions // Not exposed outside Compiler protected: bool fgReachable(BasicBlock* b1, BasicBlock* b2); // Returns true if block b1 can reach block b2 // Compute immediate dominators, the dominator tree and and its pre/post-order travsersal numbers. void fgComputeDoms(); void fgCompDominatedByExceptionalEntryBlocks(); BlockSet_ValRet_T fgGetDominatorSet(BasicBlock* block); // Returns a set of blocks that dominate the given block. // Note: this is relatively slow compared to calling fgDominate(), // especially if dealing with a single block versus block check. void fgComputeReachabilitySets(); // Compute bbReach sets. (Also sets BBF_GC_SAFE_POINT flag on blocks.) void fgComputeReturnBlocks(); // Initialize fgReturnBlocks to a list of BBJ_RETURN blocks. void fgComputeEnterBlocksSet(); // Compute the set of entry blocks, 'fgEnterBlks'. bool fgRemoveUnreachableBlocks(); // Remove blocks determined to be unreachable by the bbReach sets. void fgComputeReachability(); // Perform flow graph node reachability analysis. BasicBlock* fgIntersectDom(BasicBlock* a, BasicBlock* b); // Intersect two immediate dominator sets. void fgDfsInvPostOrder(); // In order to compute dominance using fgIntersectDom, the flow graph nodes must be // processed in topological sort, this function takes care of that. void fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count); BlockSet_ValRet_T fgDomFindStartNodes(); // Computes which basic blocks don't have incoming edges in the flow graph. // Returns this as a set. INDEBUG(void fgDispDomTree(DomTreeNode* domTree);) // Helper that prints out the Dominator Tree in debug builds. DomTreeNode* fgBuildDomTree(); // Once we compute all the immediate dominator sets for each node in the flow graph // (performed by fgComputeDoms), this procedure builds the dominance tree represented // adjacency lists. // In order to speed up the queries of the form 'Does A dominates B', we can perform a DFS preorder and postorder // traversal of the dominance tree and the dominance query will become A dominates B iif preOrder(A) <= preOrder(B) // && postOrder(A) >= postOrder(B) making the computation O(1). void fgNumberDomTree(DomTreeNode* domTree); // When the flow graph changes, we need to update the block numbers, predecessor lists, reachability sets, // dominators, and possibly loops. void fgUpdateChangedFlowGraph(const bool computePreds = true, const bool computeDoms = true, const bool computeReturnBlocks = false, const bool computeLoops = false); public: // Compute the predecessors of the blocks in the control flow graph. void fgComputePreds(); // Remove all predecessor information. void fgRemovePreds(); // Compute the cheap flow graph predecessors lists. This is used in some early phases // before the full predecessors lists are computed. void fgComputeCheapPreds(); private: void fgAddCheapPred(BasicBlock* block, BasicBlock* blockPred); void fgRemoveCheapPred(BasicBlock* block, BasicBlock* blockPred); public: enum GCPollType { GCPOLL_NONE, GCPOLL_CALL, GCPOLL_INLINE }; // Initialize the per-block variable sets (used for liveness analysis). void fgInitBlockVarSets(); PhaseStatus fgInsertGCPolls(); BasicBlock* fgCreateGCPoll(GCPollType pollType, BasicBlock* block); // Requires that "block" is a block that returns from // a finally. Returns the number of successors (jump targets of // of blocks in the covered "try" that did a "LEAVE".) unsigned fgNSuccsOfFinallyRet(BasicBlock* block); // Requires that "block" is a block that returns (in the sense of BBJ_EHFINALLYRET) from // a finally. Returns its "i"th successor (jump targets of // of blocks in the covered "try" that did a "LEAVE".) // Requires that "i" < fgNSuccsOfFinallyRet(block). BasicBlock* fgSuccOfFinallyRet(BasicBlock* block, unsigned i); private: // Factor out common portions of the impls of the methods above. void fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres); public: // For many purposes, it is desirable to be able to enumerate the *distinct* targets of a switch statement, // skipping duplicate targets. (E.g., in flow analyses that are only interested in the set of possible targets.) // SwitchUniqueSuccSet contains the non-duplicated switch targets. // (Code that modifies the jump table of a switch has an obligation to call Compiler::UpdateSwitchTableTarget, // which in turn will call the "UpdateTarget" method of this type if a SwitchUniqueSuccSet has already // been computed for the switch block. If a switch block is deleted or is transformed into a non-switch, // we leave the entry associated with the block, but it will no longer be accessed.) struct SwitchUniqueSuccSet { unsigned numDistinctSuccs; // Number of distinct targets of the switch. BasicBlock** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target // successors. // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. Use "alloc" to do any required allocation. void UpdateTarget(CompAllocator alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); }; typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet> BlockToSwitchDescMap; private: // Maps BasicBlock*'s that end in switch statements to SwitchUniqueSuccSets that allow // iteration over only the distinct successors. BlockToSwitchDescMap* m_switchDescMap; public: BlockToSwitchDescMap* GetSwitchDescMap(bool createIfNull = true) { if ((m_switchDescMap == nullptr) && createIfNull) { m_switchDescMap = new (getAllocator()) BlockToSwitchDescMap(getAllocator()); } return m_switchDescMap; } // Invalidate the map of unique switch block successors. For example, since the hash key of the map // depends on block numbers, we must invalidate the map when the blocks are renumbered, to ensure that // we don't accidentally look up and return the wrong switch data. void InvalidateUniqueSwitchSuccMap() { m_switchDescMap = nullptr; } // Requires "switchBlock" to be a block that ends in a switch. Returns // the corresponding SwitchUniqueSuccSet. SwitchUniqueSuccSet GetDescriptorForSwitch(BasicBlock* switchBlk); // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. void UpdateSwitchTableTarget(BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); // Remove the "SwitchUniqueSuccSet" of "switchBlk" in the BlockToSwitchDescMap. void fgInvalidateSwitchDescMapEntry(BasicBlock* switchBlk); BasicBlock* fgFirstBlockOfHandler(BasicBlock* block); bool fgIsFirstBlockOfFilterOrHandler(BasicBlock* block); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, flowList*** ptrToPred); flowList* fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred); flowList* fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred); void fgRemoveBlockAsPred(BasicBlock* block); void fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock); void fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred); flowList* fgAddRefPred(BasicBlock* block, BasicBlock* blockPred, flowList* oldEdge = nullptr, bool initializingPreds = false); // Only set to 'true' when we are computing preds in // fgComputePreds() void fgFindBasicBlocks(); bool fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt); bool fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion); BasicBlock* fgFindInsertPoint(unsigned regionIndex, bool putInTryRegion, BasicBlock* startBlk, BasicBlock* endBlk, BasicBlock* nearBlk, BasicBlock* jumpBlk, bool runRarely); unsigned fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting = nullptr); void fgPostImportationCleanup(); void fgRemoveStmt(BasicBlock* block, Statement* stmt DEBUGARG(bool isUnlink = false)); void fgUnlinkStmt(BasicBlock* block, Statement* stmt); bool fgCheckRemoveStmt(BasicBlock* block, Statement* stmt); void fgCreateLoopPreHeader(unsigned lnum); void fgUnreachableBlock(BasicBlock* block); void fgRemoveConditionalJump(BasicBlock* block); BasicBlock* fgLastBBInMainFunction(); BasicBlock* fgEndBBAfterMainFunction(); void fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd); void fgRemoveBlock(BasicBlock* block, bool unreachable); bool fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext); BasicBlock* fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst); bool fgRenumberBlocks(); bool fgExpandRarelyRunBlocks(); bool fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter); void fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk); enum FG_RELOCATE_TYPE { FG_RELOCATE_TRY, // relocate the 'try' region FG_RELOCATE_HANDLER // relocate the handler region (including the filter if necessary) }; BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType); #if defined(FEATURE_EH_FUNCLETS) #if defined(TARGET_ARM) void fgClearFinallyTargetBit(BasicBlock* block); #endif // defined(TARGET_ARM) bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block); bool fgAnyIntraHandlerPreds(BasicBlock* block); void fgInsertFuncletPrologBlock(BasicBlock* block); void fgCreateFuncletPrologBlocks(); void fgCreateFunclets(); #else // !FEATURE_EH_FUNCLETS bool fgRelocateEHRegions(); #endif // !FEATURE_EH_FUNCLETS bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target); bool fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNum); bool fgBlockIsGoodTailDuplicationCandidate(BasicBlock* block, unsigned* lclNum); bool fgOptimizeEmptyBlock(BasicBlock* block); bool fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest); bool fgOptimizeBranch(BasicBlock* bJump); bool fgOptimizeSwitchBranches(BasicBlock* block); bool fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev); bool fgOptimizeSwitchJumps(); #ifdef DEBUG void fgPrintEdgeWeights(); #endif void fgComputeBlockAndEdgeWeights(); weight_t fgComputeMissingBlockWeights(); void fgComputeCalledCount(weight_t returnWeight); void fgComputeEdgeWeights(); bool fgReorderBlocks(); PhaseStatus fgDetermineFirstColdBlock(); bool fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc = nullptr); bool fgUpdateFlowGraph(bool doTailDup = false); void fgFindOperOrder(); // method that returns if you should split here typedef bool(fgSplitPredicate)(GenTree* tree, GenTree* parent, fgWalkData* data); void fgSetBlockOrder(); void fgRemoveReturnBlock(BasicBlock* block); /* Helper code that has been factored out */ inline void fgConvertBBToThrowBB(BasicBlock* block); bool fgCastNeeded(GenTree* tree, var_types toType); GenTree* fgDoNormalizeOnStore(GenTree* tree); GenTree* fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry); // The following check for loops that don't execute calls bool fgLoopCallMarked; void fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB); void fgLoopCallMark(); void fgMarkLoopHead(BasicBlock* block); unsigned fgGetCodeEstimate(BasicBlock* block); #if DUMP_FLOWGRAPHS enum class PhasePosition { PrePhase, PostPhase }; const char* fgProcessEscapes(const char* nameIn, escapeMapping_t* map); static void fgDumpTree(FILE* fgxFile, GenTree* const tree); FILE* fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePosition pos, LPCWSTR type); bool fgDumpFlowGraph(Phases phase, PhasePosition pos); #endif // DUMP_FLOWGRAPHS #ifdef DEBUG void fgDispDoms(); void fgDispReach(); void fgDispBBLiveness(BasicBlock* block); void fgDispBBLiveness(); void fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth = 0); void fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees); void fgDispBasicBlocks(bool dumpTrees = false); void fgDumpStmtTree(Statement* stmt, unsigned bbNum); void fgDumpBlock(BasicBlock* block); void fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock); static fgWalkPreFn fgStress64RsltMulCB; void fgStress64RsltMul(); void fgDebugCheckUpdate(); void fgDebugCheckBBNumIncreasing(); void fgDebugCheckBBlist(bool checkBBNum = false, bool checkBBRefs = true); void fgDebugCheckBlockLinks(); void fgDebugCheckLinks(bool morphTrees = false); void fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees); void fgDebugCheckNodeLinks(BasicBlock* block, Statement* stmt); void fgDebugCheckNodesUniqueness(); void fgDebugCheckLoopTable(); void fgDebugCheckFlags(GenTree* tree); void fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags); void fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags); void fgDebugCheckTryFinallyExits(); void fgDebugCheckProfileData(); bool fgDebugCheckIncomingProfileData(BasicBlock* block); bool fgDebugCheckOutgoingProfileData(BasicBlock* block); #endif // DEBUG static bool fgProfileWeightsEqual(weight_t weight1, weight_t weight2); static bool fgProfileWeightsConsistent(weight_t weight1, weight_t weight2); static GenTree* fgGetFirstNode(GenTree* tree); //--------------------- Walking the trees in the IR ----------------------- struct fgWalkData { Compiler* compiler; fgWalkPreFn* wtprVisitorFn; fgWalkPostFn* wtpoVisitorFn; void* pCallbackData; // user-provided data GenTree* parent; // parent of current node, provided to callback GenTreeStack* parentStack; // stack of parent nodes, if asked for bool wtprLclsOnly; // whether to only visit lclvar nodes #ifdef DEBUG bool printModified; // callback can use this #endif }; fgWalkResult fgWalkTreePre(GenTree** pTree, fgWalkPreFn* visitor, void* pCallBackData = nullptr, bool lclVarsOnly = false, bool computeStack = false); fgWalkResult fgWalkTree(GenTree** pTree, fgWalkPreFn* preVisitor, fgWalkPostFn* postVisitor, void* pCallBackData = nullptr); void fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData); //----- Postorder fgWalkResult fgWalkTreePost(GenTree** pTree, fgWalkPostFn* visitor, void* pCallBackData = nullptr, bool computeStack = false); // An fgWalkPreFn that looks for expressions that have inline throws in // minopts mode. Basically it looks for tress with gtOverflowEx() or // GTF_IND_RNGCHK. It returns WALK_ABORT if one is found. It // returns WALK_SKIP_SUBTREES if GTF_EXCEPT is not set (assumes flags // properly propagated to parent trees). It returns WALK_CONTINUE // otherwise. static fgWalkResult fgChkThrowCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkLocAllocCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkQmarkCB(GenTree** pTree, Compiler::fgWalkData* data); /************************************************************************** * PROTECTED *************************************************************************/ protected: friend class SsaBuilder; friend struct ValueNumberState; //--------------------- Detect the basic blocks --------------------------- BasicBlock** fgBBs; // Table of pointers to the BBs void fgInitBBLookup(); BasicBlock* fgLookupBB(unsigned addr); bool fgCanSwitchToOptimized(); void fgSwitchToOptimized(const char* reason); bool fgMayExplicitTailCall(); void fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock); void fgLinkBasicBlocks(); unsigned fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgCheckBasicBlockControlFlow(); void fgControlFlowPermitted(BasicBlock* blkSrc, BasicBlock* blkDest, bool IsLeave = false /* is the src a leave block */); bool fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling); void fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining); void fgAdjustForAddressExposedOrWrittenThis(); unsigned fgStressBBProf() { #ifdef DEBUG unsigned result = JitConfig.JitStressBBProf(); if (result == 0) { if (compStressCompile(STRESS_BB_PROFILE, 15)) { result = 1; } } return result; #else return 0; #endif } bool fgHaveProfileData(); bool fgGetProfileWeightForBasicBlock(IL_OFFSET offset, weight_t* weight); Instrumentor* fgCountInstrumentor; Instrumentor* fgClassInstrumentor; PhaseStatus fgPrepareToInstrumentMethod(); PhaseStatus fgInstrumentMethod(); PhaseStatus fgIncorporateProfileData(); void fgIncorporateBlockCounts(); void fgIncorporateEdgeCounts(); CORINFO_CLASS_HANDLE getRandomClass(ICorJitInfo::PgoInstrumentationSchema* schema, UINT32 countSchemaItems, BYTE* pInstrumentationData, int32_t ilOffset, CLRRandom* random); public: const char* fgPgoFailReason; bool fgPgoDisabled; ICorJitInfo::PgoSource fgPgoSource; ICorJitInfo::PgoInstrumentationSchema* fgPgoSchema; BYTE* fgPgoData; UINT32 fgPgoSchemaCount; HRESULT fgPgoQueryResult; UINT32 fgNumProfileRuns; UINT32 fgPgoBlockCounts; UINT32 fgPgoEdgeCounts; UINT32 fgPgoClassProfiles; unsigned fgPgoInlineePgo; unsigned fgPgoInlineeNoPgo; unsigned fgPgoInlineeNoPgoSingleBlock; void WalkSpanningTree(SpanningTreeVisitor* visitor); void fgSetProfileWeight(BasicBlock* block, weight_t weight); void fgApplyProfileScale(); bool fgHaveSufficientProfileData(); bool fgHaveTrustedProfileData(); // fgIsUsingProfileWeights - returns true if we have real profile data for this method // or if we have some fake profile data for the stress mode bool fgIsUsingProfileWeights() { return (fgHaveProfileData() || fgStressBBProf()); } // fgProfileRunsCount - returns total number of scenario runs for the profile data // or BB_UNITY_WEIGHT_UNSIGNED when we aren't using profile data. unsigned fgProfileRunsCount() { return fgIsUsingProfileWeights() ? fgNumProfileRuns : BB_UNITY_WEIGHT_UNSIGNED; } //-------- Insert a statement at the start or end of a basic block -------- #ifdef DEBUG public: static bool fgBlockContainsStatementBounded(BasicBlock* block, Statement* stmt, bool answerOnBoundExceeded = true); #endif public: Statement* fgNewStmtAtBeg(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); void fgInsertStmtAtEnd(BasicBlock* block, Statement* stmt); Statement* fgNewStmtAtEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); Statement* fgNewStmtNearEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); private: void fgInsertStmtNearEnd(BasicBlock* block, Statement* stmt); void fgInsertStmtAtBeg(BasicBlock* block, Statement* stmt); void fgInsertStmtAfter(BasicBlock* block, Statement* insertionPoint, Statement* stmt); public: void fgInsertStmtBefore(BasicBlock* block, Statement* insertionPoint, Statement* stmt); private: Statement* fgInsertStmtListAfter(BasicBlock* block, Statement* stmtAfter, Statement* stmtList); // Create a new temporary variable to hold the result of *ppTree, // and transform the graph accordingly. GenTree* fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr); GenTree* fgMakeMultiUse(GenTree** ppTree); private: // Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node. GenTree* fgRecognizeAndMorphBitwiseRotation(GenTree* tree); bool fgOperIsBitwiseRotationRoot(genTreeOps oper); #if !defined(TARGET_64BIT) // Recognize and morph a long multiplication with 32 bit operands. GenTreeOp* fgRecognizeAndMorphLongMul(GenTreeOp* mul); GenTreeOp* fgMorphLongMul(GenTreeOp* mul); #endif //-------- Determine the order in which the trees will be evaluated ------- unsigned fgTreeSeqNum; GenTree* fgTreeSeqLst; GenTree* fgTreeSeqBeg; GenTree* fgSetTreeSeq(GenTree* tree, GenTree* prev = nullptr, bool isLIR = false); void fgSetTreeSeqHelper(GenTree* tree, bool isLIR); void fgSetTreeSeqFinish(GenTree* tree, bool isLIR); void fgSetStmtSeq(Statement* stmt); void fgSetBlockOrder(BasicBlock* block); //------------------------- Morphing -------------------------------------- unsigned fgPtrArgCntMax; public: //------------------------------------------------------------------------ // fgGetPtrArgCntMax: Return the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This value is calculated during morph. // // Return Value: // Returns fgPtrArgCntMax, that is a private field. // unsigned fgGetPtrArgCntMax() const { return fgPtrArgCntMax; } //------------------------------------------------------------------------ // fgSetPtrArgCntMax: Set the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This function is used during StackLevelSetter to fix incorrect morph calculations. // void fgSetPtrArgCntMax(unsigned argCntMax) { fgPtrArgCntMax = argCntMax; } bool compCanEncodePtrArgCntMax(); private: hashBv* fgOutgoingArgTemps; hashBv* fgCurrentlyInUseArgTemps; void fgSetRngChkTarget(GenTree* tree, bool delay = true); BasicBlock* fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay); #if REARRANGE_ADDS void fgMoveOpsLeft(GenTree* tree); #endif bool fgIsCommaThrow(GenTree* tree, bool forFolding = false); bool fgIsThrow(GenTree* tree); bool fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2); bool fgIsBlockCold(BasicBlock* block); GenTree* fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper); GenTree* fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs = true); GenTree* fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs); // A "MorphAddrContext" carries information from the surrounding context. If we are evaluating a byref address, // it is useful to know whether the address will be immediately dereferenced, or whether the address value will // be used, perhaps by passing it as an argument to a called method. This affects how null checking is done: // for sufficiently small offsets, we can rely on OS page protection to implicitly null-check addresses that we // know will be dereferenced. To know that reliance on implicit null checking is sound, we must further know that // all offsets between the top-level indirection and the bottom are constant, and that their sum is sufficiently // small; hence the other fields of MorphAddrContext. enum MorphAddrContextKind { MACK_Ind, MACK_Addr, }; struct MorphAddrContext { MorphAddrContextKind m_kind; bool m_allConstantOffsets; // Valid only for "m_kind == MACK_Ind". True iff all offsets between // top-level indirection and here have been constants. size_t m_totalOffset; // Valid only for "m_kind == MACK_Ind", and if "m_allConstantOffsets" is true. // In that case, is the sum of those constant offsets. MorphAddrContext(MorphAddrContextKind kind) : m_kind(kind), m_allConstantOffsets(true), m_totalOffset(0) { } }; // A MACK_CopyBlock context is immutable, so we can just make one of these and share it. static MorphAddrContext s_CopyBlockMAC; #ifdef FEATURE_SIMD GenTree* getSIMDStructFromField(GenTree* tree, CorInfoType* simdBaseJitTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic = false); GenTree* fgMorphFieldAssignToSimdSetElement(GenTree* tree); GenTree* fgMorphFieldToSimdGetElement(GenTree* tree); bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt); void impMarkContiguousSIMDFieldAssignments(Statement* stmt); // fgPreviousCandidateSIMDFieldAsgStmt is only used for tracking previous simd field assignment // in function: Complier::impMarkContiguousSIMDFieldAssignments. Statement* fgPreviousCandidateSIMDFieldAsgStmt; #endif // FEATURE_SIMD GenTree* fgMorphArrayIndex(GenTree* tree); GenTree* fgMorphExpandCast(GenTreeCast* tree); GenTreeFieldList* fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl); void fgInitArgInfo(GenTreeCall* call); GenTreeCall* fgMorphArgs(GenTreeCall* call); void fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass); GenTree* fgMorphLocalVar(GenTree* tree, bool forceRemorph); public: bool fgAddrCouldBeNull(GenTree* addr); private: GenTree* fgMorphField(GenTree* tree, MorphAddrContext* mac); bool fgCanFastTailCall(GenTreeCall* call, const char** failReason); #if FEATURE_FASTTAILCALL bool fgCallHasMustCopyByrefParameter(GenTreeCall* callee); #endif bool fgCheckStmtAfterTailCall(); GenTree* fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help); bool fgCanTailCallViaJitHelper(); void fgMorphTailCallViaJitHelper(GenTreeCall* call); GenTree* fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall, CORINFO_METHOD_HANDLE callTargetStubHnd, CORINFO_METHOD_HANDLE dispatcherHnd); GenTree* getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle); GenTree* getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* getVirtMethodPointerTree(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent); GenTree* fgMorphPotentialTailCall(GenTreeCall* call); GenTree* fgGetStubAddrArg(GenTreeCall* call); unsigned fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry); void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall); Statement* fgAssignRecursiveCallArgToCallerParam(GenTree* arg, fgArgTabEntry* argTabEntry, unsigned lclParamNum, BasicBlock* block, const DebugInfo& callDI, Statement* tmpAssignmentInsertionPoint, Statement* paramAssignmentInsertionPoint); GenTree* fgMorphCall(GenTreeCall* call); GenTree* fgExpandVirtualVtableCallTarget(GenTreeCall* call); void fgMorphCallInline(GenTreeCall* call, InlineResult* result); void fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); #if DEBUG void fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call); static fgWalkPreFn fgFindNonInlineCandidate; #endif GenTree* fgOptimizeDelegateConstructor(GenTreeCall* call, CORINFO_CONTEXT_HANDLE* ExactContextHnd, CORINFO_RESOLVED_TOKEN* ldftnToken); GenTree* fgMorphLeaf(GenTree* tree); void fgAssignSetVarDef(GenTree* tree); GenTree* fgMorphOneAsgBlockOp(GenTree* tree); GenTree* fgMorphInitBlock(GenTree* tree); GenTree* fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize); GenTree* fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue = false); GenTree* fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd); GenTree* fgMorphCopyBlock(GenTree* tree); GenTree* fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree); GenTree* fgMorphForRegisterFP(GenTree* tree); GenTree* fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac = nullptr); GenTree* fgOptimizeCast(GenTreeCast* cast); GenTree* fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp); GenTree* fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp); #ifdef FEATURE_HW_INTRINSICS GenTree* fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node); #endif GenTree* fgOptimizeCommutativeArithmetic(GenTreeOp* tree); GenTree* fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp); GenTree* fgOptimizeAddition(GenTreeOp* add); GenTree* fgOptimizeMultiply(GenTreeOp* mul); GenTree* fgOptimizeBitwiseAnd(GenTreeOp* andOp); GenTree* fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects); GenTree* fgMorphRetInd(GenTreeUnOp* tree); GenTree* fgMorphModToSubMulDiv(GenTreeOp* tree); GenTree* fgMorphUModToAndSub(GenTreeOp* tree); GenTree* fgMorphSmpOpOptional(GenTreeOp* tree); GenTree* fgMorphMultiOp(GenTreeMultiOp* multiOp); GenTree* fgMorphConst(GenTree* tree); bool fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2); GenTreeLclVar* fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes = true); GenTreeOp* fgMorphCommutative(GenTreeOp* tree); GenTree* fgMorphCastedBitwiseOp(GenTreeOp* tree); GenTree* fgMorphReduceAddOps(GenTree* tree); public: GenTree* fgMorphTree(GenTree* tree, MorphAddrContext* mac = nullptr); private: void fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree)); void fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree)); void fgMorphTreeDone(GenTree* tree, GenTree* oldTree = nullptr DEBUGARG(int morphNum = 0)); Statement* fgMorphStmt; unsigned fgGetBigOffsetMorphingTemp(var_types type); // We cache one temp per type to be // used when morphing big offset. //----------------------- Liveness analysis ------------------------------- VARSET_TP fgCurUseSet; // vars used by block (before an assignment) VARSET_TP fgCurDefSet; // vars assigned by block (before a use) MemoryKindSet fgCurMemoryUse; // True iff the current basic block uses memory. MemoryKindSet fgCurMemoryDef; // True iff the current basic block modifies memory. MemoryKindSet fgCurMemoryHavoc; // True if the current basic block is known to set memory to a "havoc" value. bool byrefStatesMatchGcHeapStates; // True iff GcHeap and ByrefExposed memory have all the same def points. void fgMarkUseDef(GenTreeLclVarCommon* tree); void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgMarkInScope(BasicBlock* block, VARSET_VALARG_TP inScope); void fgUnmarkInScope(BasicBlock* block, VARSET_VALARG_TP unmarkScope); void fgExtendDbgScopes(); void fgExtendDbgLifetimes(); #ifdef DEBUG void fgDispDebugScopes(); #endif // DEBUG //------------------------------------------------------------------------- // // The following keeps track of any code we've added for things like array // range checking or explicit calls to enable GC, and so on. // public: struct AddCodeDsc { AddCodeDsc* acdNext; BasicBlock* acdDstBlk; // block to which we jump unsigned acdData; SpecialCodeKind acdKind; // what kind of a special block is this? #if !FEATURE_FIXED_OUT_ARGS bool acdStkLvlInit; // has acdStkLvl value been already set? unsigned acdStkLvl; // stack level in stack slots. #endif // !FEATURE_FIXED_OUT_ARGS }; private: static unsigned acdHelper(SpecialCodeKind codeKind); AddCodeDsc* fgAddCodeList; bool fgAddCodeModf; bool fgRngChkThrowAdded; AddCodeDsc* fgExcptnTargetCache[SCK_COUNT]; BasicBlock* fgRngChkTarget(BasicBlock* block, SpecialCodeKind kind); BasicBlock* fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, SpecialCodeKind kind); public: AddCodeDsc* fgFindExcptnTarget(SpecialCodeKind kind, unsigned refData); bool fgUseThrowHelperBlocks(); AddCodeDsc* fgGetAdditionalCodeDescriptors() { return fgAddCodeList; } private: bool fgIsCodeAdded(); bool fgIsThrowHlpBlk(BasicBlock* block); #if !FEATURE_FIXED_OUT_ARGS unsigned fgThrowHlpBlkStkLevel(BasicBlock* block); #endif // !FEATURE_FIXED_OUT_ARGS unsigned fgBigOffsetMorphingTemps[TYP_COUNT]; unsigned fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo); void fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); void fgInsertInlineeBlocks(InlineInfo* pInlineInfo); Statement* fgInlinePrependStatements(InlineInfo* inlineInfo); void fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmt); #if FEATURE_MULTIREG_RET GenTree* fgGetStructAsStructPtr(GenTree* tree); GenTree* fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); void fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); #endif // FEATURE_MULTIREG_RET static fgWalkPreFn fgUpdateInlineReturnExpressionPlaceHolder; static fgWalkPostFn fgLateDevirtualization; #ifdef DEBUG static fgWalkPreFn fgDebugCheckInlineCandidates; void CheckNoTransformableIndirectCallsRemain(); static fgWalkPreFn fgDebugCheckForTransformableIndirectCalls; #endif void fgPromoteStructs(); void fgMorphStructField(GenTree* tree, GenTree* parent); void fgMorphLocalField(GenTree* tree, GenTree* parent); // Reset the refCount for implicit byrefs. void fgResetImplicitByRefRefCount(); // Change implicit byrefs' types from struct to pointer, and for any that were // promoted, create new promoted struct temps. void fgRetypeImplicitByRefArgs(); // Rewrite appearances of implicit byrefs (manifest the implied additional level of indirection). bool fgMorphImplicitByRefArgs(GenTree* tree); GenTree* fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr); // Clear up annotations for any struct promotion temps created for implicit byrefs. void fgMarkDemotedImplicitByRefArgs(); void fgMarkAddressExposedLocals(); void fgMarkAddressExposedLocals(Statement* stmt); PhaseStatus fgForwardSub(); bool fgForwardSubBlock(BasicBlock* block); bool fgForwardSubStatement(Statement* statement); static fgWalkPreFn fgUpdateSideEffectsPre; static fgWalkPostFn fgUpdateSideEffectsPost; // The given local variable, required to be a struct variable, is being assigned via // a "lclField", to make it masquerade as an integral type in the ABI. Make sure that // the variable is not enregistered, and is therefore not promoted independently. void fgLclFldAssign(unsigned lclNum); static fgWalkPreFn gtHasLocalsWithAddrOpCB; enum TypeProducerKind { TPK_Unknown = 0, // May not be a RuntimeType TPK_Handle = 1, // RuntimeType via handle TPK_GetType = 2, // RuntimeType via Object.get_Type() TPK_Null = 3, // Tree value is null TPK_Other = 4 // RuntimeType via other means }; TypeProducerKind gtGetTypeProducerKind(GenTree* tree); bool gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call); bool gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper = nullptr); bool gtIsActiveCSE_Candidate(GenTree* tree); bool fgIsBigOffset(size_t offset); bool fgNeedReturnSpillTemp(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Optimizer XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: void optInit(); GenTree* optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt); GenTree* optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt); void optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt); protected: // Do hoisting for all loops. void optHoistLoopCode(); // To represent sets of VN's that have already been hoisted in outer loops. typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, bool> VNSet; struct LoopHoistContext { private: // The set of variables hoisted in the current loop (or nullptr if there are none). VNSet* m_pHoistedInCurLoop; public: // Value numbers of expressions that have been hoisted in parent loops in the loop nest. VNSet m_hoistedInParentLoops; // Value numbers of expressions that have been hoisted in the current (or most recent) loop in the nest. // Previous decisions on loop-invariance of value numbers in the current loop. VNSet m_curLoopVnInvariantCache; VNSet* GetHoistedInCurLoop(Compiler* comp) { if (m_pHoistedInCurLoop == nullptr) { m_pHoistedInCurLoop = new (comp->getAllocatorLoopHoist()) VNSet(comp->getAllocatorLoopHoist()); } return m_pHoistedInCurLoop; } VNSet* ExtractHoistedInCurLoop() { VNSet* res = m_pHoistedInCurLoop; m_pHoistedInCurLoop = nullptr; return res; } LoopHoistContext(Compiler* comp) : m_pHoistedInCurLoop(nullptr) , m_hoistedInParentLoops(comp->getAllocatorLoopHoist()) , m_curLoopVnInvariantCache(comp->getAllocatorLoopHoist()) { } }; // Do hoisting for loop "lnum" (an index into the optLoopTable), and all loops nested within it. // Tracks the expressions that have been hoisted by containing loops by temporarily recording their // value numbers in "m_hoistedInParentLoops". This set is not modified by the call. void optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt); // Do hoisting for a particular loop ("lnum" is an index into the optLoopTable.) // Assumes that expressions have been hoisted in containing loops if their value numbers are in // "m_hoistedInParentLoops". // void optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt); // Hoist all expressions in "blocks" that are invariant in loop "loopNum" (an index into the optLoopTable) // outside of that loop. Exempt expressions whose value number is in "m_hoistedInParentLoops"; add VN's of hoisted // expressions to "hoistInLoop". void optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext); // Return true if the tree looks profitable to hoist out of loop 'lnum'. bool optIsProfitableToHoistTree(GenTree* tree, unsigned lnum); // Performs the hoisting 'tree' into the PreHeader for loop 'lnum' void optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt); // Returns true iff the ValueNum "vn" represents a value that is loop-invariant in "lnum". // Constants and init values are always loop invariant. // VNPhi's connect VN's to the SSA definition, so we can know if the SSA def occurs in the loop. bool optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* recordedVNs); // If "blk" is the entry block of a natural loop, returns true and sets "*pLnum" to the index of the loop // in the loop table. bool optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum); // Records the set of "side effects" of all loops: fields (object instance and static) // written to, and SZ-array element type equivalence classes updated. void optComputeLoopSideEffects(); #ifdef DEBUG bool optAnyChildNotRemoved(unsigned loopNum); #endif // DEBUG // Mark a loop as removed. void optMarkLoopRemoved(unsigned loopNum); private: // Requires "lnum" to be the index of an outermost loop in the loop table. Traverses the body of that loop, // including all nested loops, and records the set of "side effects" of the loop: fields (object instance and // static) written to, and SZ-array element type equivalence classes updated. void optComputeLoopNestSideEffects(unsigned lnum); // Given a loop number 'lnum' mark it and any nested loops as having 'memoryHavoc' void optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc); // Add the side effects of "blk" (which is required to be within a loop) to all loops of which it is a part. // Returns false if we encounter a block that is not marked as being inside a loop. // bool optComputeLoopSideEffectsOfBlock(BasicBlock* blk); // Hoist the expression "expr" out of loop "lnum". void optPerformHoistExpr(GenTree* expr, BasicBlock* exprBb, unsigned lnum); public: void optOptimizeBools(); public: PhaseStatus optInvertLoops(); // Invert loops so they're entered at top and tested at bottom. PhaseStatus optOptimizeLayout(); // Optimize the BasicBlock layout of the method PhaseStatus optSetBlockWeights(); PhaseStatus optFindLoopsPhase(); // Finds loops and records them in the loop table void optFindLoops(); PhaseStatus optCloneLoops(); void optCloneLoop(unsigned loopInd, LoopCloneContext* context); void optEnsureUniqueHead(unsigned loopInd, weight_t ambientWeight); PhaseStatus optUnrollLoops(); // Unrolls loops (needs to have cost info) void optRemoveRedundantZeroInits(); protected: // This enumeration describes what is killed by a call. enum callInterf { CALLINT_NONE, // no interference (most helpers) CALLINT_REF_INDIRS, // kills GC ref indirections (SETFIELD OBJ) CALLINT_SCL_INDIRS, // kills non GC ref indirections (SETFIELD non-OBJ) CALLINT_ALL_INDIRS, // kills both GC ref and non GC ref indirections (SETFIELD STRUCT) CALLINT_ALL, // kills everything (normal method call) }; enum class FieldKindForVN { SimpleStatic, WithBaseAddr }; public: // A "LoopDsc" describes a ("natural") loop. We (currently) require the body of a loop to be a contiguous (in // bbNext order) sequence of basic blocks. (At times, we may require the blocks in a loop to be "properly numbered" // in bbNext order; we use comparisons on the bbNum to decide order.) // The blocks that define the body are // top <= entry <= bottom // The "head" of the loop is a block outside the loop that has "entry" as a successor. We only support loops with a // single 'head' block. The meanings of these blocks are given in the definitions below. Also see the picture at // Compiler::optFindNaturalLoops(). struct LoopDsc { BasicBlock* lpHead; // HEAD of the loop (not part of the looping of the loop) -- has ENTRY as a successor. BasicBlock* lpTop; // loop TOP (the back edge from lpBottom reaches here). Lexically first block (in bbNext // order) reachable in this loop. BasicBlock* lpEntry; // the ENTRY in the loop (in most cases TOP or BOTTOM) BasicBlock* lpBottom; // loop BOTTOM (from here we have a back edge to the TOP) BasicBlock* lpExit; // if a single exit loop this is the EXIT (in most cases BOTTOM) callInterf lpAsgCall; // "callInterf" for calls in the loop ALLVARSET_TP lpAsgVars; // set of vars assigned within the loop (all vars, not just tracked) varRefKinds lpAsgInds : 8; // set of inds modified within the loop LoopFlags lpFlags; unsigned char lpExitCnt; // number of exits from the loop unsigned char lpParent; // The index of the most-nested loop that completely contains this one, // or else BasicBlock::NOT_IN_LOOP if no such loop exists. unsigned char lpChild; // The index of a nested loop, or else BasicBlock::NOT_IN_LOOP if no child exists. // (Actually, an "immediately" nested loop -- // no other child of this loop is a parent of lpChild.) unsigned char lpSibling; // The index of another loop that is an immediate child of lpParent, // or else BasicBlock::NOT_IN_LOOP. One can enumerate all the children of a loop // by following "lpChild" then "lpSibling" links. bool lpLoopHasMemoryHavoc[MemoryKindCount]; // The loop contains an operation that we assume has arbitrary // memory side effects. If this is set, the fields below // may not be accurate (since they become irrelevant.) VARSET_TP lpVarInOut; // The set of variables that are IN or OUT during the execution of this loop VARSET_TP lpVarUseDef; // The set of variables that are USE or DEF during the execution of this loop // The following counts are used for hoisting profitability checks. int lpHoistedExprCount; // The register count for the non-FP expressions from inside this loop that have been // hoisted int lpLoopVarCount; // The register count for the non-FP LclVars that are read/written inside this loop int lpVarInOutCount; // The register count for the non-FP LclVars that are alive inside or across this loop int lpHoistedFPExprCount; // The register count for the FP expressions from inside this loop that have been // hoisted int lpLoopVarFPCount; // The register count for the FP LclVars that are read/written inside this loop int lpVarInOutFPCount; // The register count for the FP LclVars that are alive inside or across this loop typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>, FieldKindForVN> FieldHandleSet; FieldHandleSet* lpFieldsModified; // This has entries for all static field and object instance fields modified // in the loop. typedef JitHashTable<CORINFO_CLASS_HANDLE, JitPtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>, bool> ClassHandleSet; ClassHandleSet* lpArrayElemTypesModified; // Bits set indicate the set of sz array element types such that // arrays of that type are modified // in the loop. // Adds the variable liveness information for 'blk' to 'this' LoopDsc void AddVariableLiveness(Compiler* comp, BasicBlock* blk); inline void AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // This doesn't *always* take a class handle -- it can also take primitive types, encoded as class handles // (shifted left, with a low-order bit set to distinguish.) // Use the {Encode/Decode}ElemType methods to construct/destruct these. inline void AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd); /* The following values are set only for iterator loops, i.e. has the flag LPFLG_ITER set */ GenTree* lpIterTree; // The "i = i <op> const" tree unsigned lpIterVar() const; // iterator variable # int lpIterConst() const; // the constant with which the iterator is incremented genTreeOps lpIterOper() const; // the type of the operation on the iterator (ASG_ADD, ASG_SUB, etc.) void VERIFY_lpIterTree() const; var_types lpIterOperType() const; // For overflow instructions // Set to the block where we found the initialization for LPFLG_CONST_INIT or LPFLG_VAR_INIT loops. // Initially, this will be 'head', but 'head' might change if we insert a loop pre-header block. BasicBlock* lpInitBlock; union { int lpConstInit; // initial constant value of iterator // : Valid if LPFLG_CONST_INIT unsigned lpVarInit; // initial local var number to which we initialize the iterator // : Valid if LPFLG_VAR_INIT }; // The following is for LPFLG_ITER loops only (i.e. the loop condition is "i RELOP const or var") GenTree* lpTestTree; // pointer to the node containing the loop test genTreeOps lpTestOper() const; // the type of the comparison between the iterator and the limit (GT_LE, GT_GE, // etc.) void VERIFY_lpTestTree() const; bool lpIsReversed() const; // true if the iterator node is the second operand in the loop condition GenTree* lpIterator() const; // the iterator node in the loop test GenTree* lpLimit() const; // the limit node in the loop test // Limit constant value of iterator - loop condition is "i RELOP const" // : Valid if LPFLG_CONST_LIMIT int lpConstLimit() const; // The lclVar # in the loop condition ( "i RELOP lclVar" ) // : Valid if LPFLG_VAR_LIMIT unsigned lpVarLimit() const; // The array length in the loop condition ( "i RELOP arr.len" or "i RELOP arr[i][j].len" ) // : Valid if LPFLG_ARRLEN_LIMIT bool lpArrLenLimit(Compiler* comp, ArrIndex* index) const; // Returns "true" iff this is a "top entry" loop. bool lpIsTopEntry() const { if (lpHead->bbNext == lpEntry) { assert(lpHead->bbFallsThrough()); assert(lpTop == lpEntry); return true; } else { return false; } } // Returns "true" iff "*this" contains the blk. bool lpContains(BasicBlock* blk) const { return lpTop->bbNum <= blk->bbNum && blk->bbNum <= lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains the range [top, bottom] (allowing tops // to be equal, but requiring bottoms to be different.) bool lpContains(BasicBlock* top, BasicBlock* bottom) const { return lpTop->bbNum <= top->bbNum && bottom->bbNum < lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains "lp2" (allowing tops to be equal, but requiring // bottoms to be different.) bool lpContains(const LoopDsc& lp2) const { return lpContains(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is (properly) contained by the range [top, bottom] // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(BasicBlock* top, BasicBlock* bottom) const { return top->bbNum <= lpTop->bbNum && lpBottom->bbNum < bottom->bbNum; } // Returns "true" iff "*this" is (properly) contained by "lp2" // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(const LoopDsc& lp2) const { return lpContainedBy(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is disjoint from the range [top, bottom]. bool lpDisjoint(BasicBlock* top, BasicBlock* bottom) const { return bottom->bbNum < lpTop->bbNum || lpBottom->bbNum < top->bbNum; } // Returns "true" iff "*this" is disjoint from "lp2". bool lpDisjoint(const LoopDsc& lp2) const { return lpDisjoint(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff the loop is well-formed (see code for defn). bool lpWellFormed() const { return lpTop->bbNum <= lpEntry->bbNum && lpEntry->bbNum <= lpBottom->bbNum && (lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum); } #ifdef DEBUG void lpValidatePreHeader() const { // If this is called, we expect there to be a pre-header. assert(lpFlags & LPFLG_HAS_PREHEAD); // The pre-header must unconditionally enter the loop. assert(lpHead->GetUniqueSucc() == lpEntry); // The loop block must be marked as a pre-header. assert(lpHead->bbFlags & BBF_LOOP_PREHEADER); // The loop entry must have a single non-loop predecessor, which is the pre-header. // We can't assume here that the bbNum are properly ordered, so we can't do a simple lpContained() // check. So, we defer this check, which will be done by `fgDebugCheckLoopTable()`. } #endif // DEBUG // LoopBlocks: convenience method for enabling range-based `for` iteration over all the // blocks in a loop, e.g.: // for (BasicBlock* const block : loop->LoopBlocks()) ... // Currently, the loop blocks are expected to be in linear, lexical, `bbNext` order // from `lpTop` through `lpBottom`, inclusive. All blocks in this range are considered // to be part of the loop. // BasicBlockRangeList LoopBlocks() const { return BasicBlockRangeList(lpTop, lpBottom); } }; protected: bool fgMightHaveLoop(); // returns true if there are any back edges bool fgHasLoops; // True if this method has any loops, set in fgComputeReachability public: LoopDsc* optLoopTable; // loop descriptor table unsigned char optLoopCount; // number of tracked loops unsigned char loopAlignCandidates; // number of loops identified for alignment // Every time we rebuild the loop table, we increase the global "loop epoch". Any loop indices or // loop table pointers from the previous epoch are invalid. // TODO: validate this in some way? unsigned optCurLoopEpoch; void NewLoopEpoch() { ++optCurLoopEpoch; JITDUMP("New loop epoch %d\n", optCurLoopEpoch); } #ifdef DEBUG unsigned char loopsAligned; // number of loops actually aligned #endif // DEBUG bool optRecordLoop(BasicBlock* head, BasicBlock* top, BasicBlock* entry, BasicBlock* bottom, BasicBlock* exit, unsigned char exitCnt); void optClearLoopIterInfo(); #ifdef DEBUG void optPrintLoopInfo(unsigned lnum, bool printVerbose = false); void optPrintLoopInfo(const LoopDsc* loop, bool printVerbose = false); void optPrintLoopTable(); #endif protected: unsigned optCallCount; // number of calls made in the method unsigned optIndirectCallCount; // number of virtual, interface and indirect calls made in the method unsigned optNativeCallCount; // number of Pinvoke/Native calls made in the method unsigned optLoopsCloned; // number of loops cloned in the current method. #ifdef DEBUG void optCheckPreds(); #endif void optResetLoopInfo(); void optFindAndScaleGeneralLoopBlocks(); // Determine if there are any potential loops, and set BBF_LOOP_HEAD on potential loop heads. void optMarkLoopHeads(); void optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop = false); bool optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt); unsigned optIsLoopIncrTree(GenTree* incr); bool optCheckIterInLoopTest(unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar); bool optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar); bool optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar); bool optExtractInitTestIncr( BasicBlock* head, BasicBlock* bottom, BasicBlock* exit, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr); void optFindNaturalLoops(); void optIdentifyLoopsForAlignment(); // Ensures that all the loops in the loop nest rooted at "loopInd" (an index into the loop table) are 'canonical' -- // each loop has a unique "top." Returns "true" iff the flowgraph has been modified. bool optCanonicalizeLoopNest(unsigned char loopInd); // Ensures that the loop "loopInd" (an index into the loop table) is 'canonical' -- it has a unique "top," // unshared with any other loop. Returns "true" iff the flowgraph has been modified bool optCanonicalizeLoop(unsigned char loopInd); // Requires "l1" to be a valid loop table index, and not "BasicBlock::NOT_IN_LOOP". // Requires "l2" to be a valid loop table index, or else "BasicBlock::NOT_IN_LOOP". // Returns true iff "l2" is not NOT_IN_LOOP, and "l1" contains "l2". // A loop contains itself. bool optLoopContains(unsigned l1, unsigned l2) const; // Updates the loop table by changing loop "loopInd", whose head is required // to be "from", to be "to". Also performs this transformation for any // loop nested in "loopInd" that shares the same head as "loopInd". void optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to); void optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds = false); // Marks the containsCall information to "lnum" and any parent loops. void AddContainsCallAllContainingLoops(unsigned lnum); // Adds the variable liveness information from 'blk' to "lnum" and any parent loops. void AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk); // Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops. void AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // Adds "elemType" to the set of modified array element types of "lnum" and any parent loops. void AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemType); // Requires that "from" and "to" have the same "bbJumpKind" (perhaps because "to" is a clone // of "from".) Copies the jump destination from "from" to "to". void optCopyBlkDest(BasicBlock* from, BasicBlock* to); // Returns true if 'block' is an entry block for any loop in 'optLoopTable' bool optIsLoopEntry(BasicBlock* block) const; // The depth of the loop described by "lnum" (an index into the loop table.) (0 == top level) unsigned optLoopDepth(unsigned lnum) { assert(lnum < optLoopCount); unsigned depth = 0; while ((lnum = optLoopTable[lnum].lpParent) != BasicBlock::NOT_IN_LOOP) { ++depth; } return depth; } // Struct used in optInvertWhileLoop to count interesting constructs to boost the profitability score. struct OptInvertCountTreeInfoType { int sharedStaticHelperCount; int arrayLengthCount; }; static fgWalkResult optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data); bool optInvertWhileLoop(BasicBlock* block); private: static bool optIterSmallOverflow(int iterAtExit, var_types incrType); static bool optIterSmallUnderflow(int iterAtExit, var_types decrType); bool optComputeLoopRep(int constInit, int constLimit, int iterInc, genTreeOps iterOper, var_types iterType, genTreeOps testOper, bool unsignedTest, bool dupCond, unsigned* iterCount); static fgWalkPreFn optIsVarAssgCB; protected: bool optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var); bool optIsVarAssgLoop(unsigned lnum, unsigned var); int optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds = VR_NONE); bool optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit); protected: // The following is the upper limit on how many expressions we'll keep track // of for the CSE analysis. // static const unsigned MAX_CSE_CNT = EXPSET_SZ; static const int MIN_CSE_COST = 2; // BitVec trait information only used by the optCSE_canSwap() method, for the CSE_defMask and CSE_useMask. // This BitVec uses one bit per CSE candidate BitVecTraits* cseMaskTraits; // one bit per CSE candidate // BitVec trait information for computing CSE availability using the CSE_DataFlow algorithm. // Two bits are allocated per CSE candidate to compute CSE availability // plus an extra bit to handle the initial unvisited case. // (See CSE_DataFlow::EndMerge for an explanation of why this is necessary.) // // The two bits per CSE candidate have the following meanings: // 11 - The CSE is available, and is also available when considering calls as killing availability. // 10 - The CSE is available, but is not available when considering calls as killing availability. // 00 - The CSE is not available // 01 - An illegal combination // BitVecTraits* cseLivenessTraits; //----------------------------------------------------------------------------------------------------------------- // getCSEnum2bit: Return the normalized index to use in the EXPSET_TP for the CSE with the given CSE index. // Each GenTree has a `gtCSEnum` field. Zero is reserved to mean this node is not a CSE, positive values indicate // CSE uses, and negative values indicate CSE defs. The caller must pass a non-zero positive value, as from // GET_CSE_INDEX(). // static unsigned genCSEnum2bit(unsigned CSEnum) { assert((CSEnum > 0) && (CSEnum <= MAX_CSE_CNT)); return CSEnum - 1; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit for a CSE. // static unsigned getCSEAvailBit(unsigned CSEnum) { return genCSEnum2bit(CSEnum) * 2; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailCrossCallBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit // for a CSE considering calls as killing availability bit (see description above). // static unsigned getCSEAvailCrossCallBit(unsigned CSEnum) { return getCSEAvailBit(CSEnum) + 1; } void optPrintCSEDataFlowSet(EXPSET_VALARG_TP cseDataFlowSet, bool includeBits = true); EXPSET_TP cseCallKillsMask; // Computed once - A mask that is used to kill available CSEs at callsites /* Generic list of nodes - used by the CSE logic */ struct treeLst { treeLst* tlNext; GenTree* tlTree; }; struct treeStmtLst { treeStmtLst* tslNext; GenTree* tslTree; // tree node Statement* tslStmt; // statement containing the tree BasicBlock* tslBlock; // block containing the statement }; // The following logic keeps track of expressions via a simple hash table. struct CSEdsc { CSEdsc* csdNextInBucket; // used by the hash table size_t csdHashKey; // the orginal hashkey ssize_t csdConstDefValue; // When we CSE similar constants, this is the value that we use as the def ValueNum csdConstDefVN; // When we CSE similar constants, this is the ValueNumber that we use for the LclVar // assignment unsigned csdIndex; // 1..optCSECandidateCount bool csdIsSharedConst; // true if this CSE is a shared const bool csdLiveAcrossCall; unsigned short csdDefCount; // definition count unsigned short csdUseCount; // use count (excluding the implicit uses at defs) weight_t csdDefWtCnt; // weighted def count weight_t csdUseWtCnt; // weighted use count (excluding the implicit uses at defs) GenTree* csdTree; // treenode containing the 1st occurrence Statement* csdStmt; // stmt containing the 1st occurrence BasicBlock* csdBlock; // block containing the 1st occurrence treeStmtLst* csdTreeList; // list of matching tree nodes: head treeStmtLst* csdTreeLast; // list of matching tree nodes: tail // ToDo: This can be removed when gtGetStructHandleIfPresent stops guessing // and GT_IND nodes always have valid struct handle. // CORINFO_CLASS_HANDLE csdStructHnd; // The class handle, currently needed to create a SIMD LclVar in PerformCSE bool csdStructHndMismatch; ValueNum defExcSetPromise; // The exception set that is now required for all defs of this CSE. // This will be set to NoVN if we decide to abandon this CSE ValueNum defExcSetCurrent; // The set of exceptions we currently can use for CSE uses. ValueNum defConservNormVN; // if all def occurrences share the same conservative normal value // number, this will reflect it; otherwise, NoVN. // not used for shared const CSE's }; static const size_t s_optCSEhashSizeInitial; static const size_t s_optCSEhashGrowthFactor; static const size_t s_optCSEhashBucketSize; size_t optCSEhashSize; // The current size of hashtable size_t optCSEhashCount; // Number of entries in hashtable size_t optCSEhashMaxCountBeforeResize; // Number of entries before resize CSEdsc** optCSEhash; CSEdsc** optCSEtab; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, GenTree*> NodeToNodeMap; NodeToNodeMap* optCseCheckedBoundMap; // Maps bound nodes to ancestor compares that should be // re-numbered with the bound to improve range check elimination // Given a compare, look for a cse candidate checked bound feeding it and add a map entry if found. void optCseUpdateCheckedBoundMap(GenTree* compare); void optCSEstop(); CSEdsc* optCSEfindDsc(unsigned index); bool optUnmarkCSE(GenTree* tree); // user defined callback data for the tree walk function optCSE_MaskHelper() struct optCSE_MaskData { EXPSET_TP CSE_defMask; EXPSET_TP CSE_useMask; }; // Treewalk helper for optCSE_DefMask and optCSE_UseMask static fgWalkPreFn optCSE_MaskHelper; // This function walks all the node for an given tree // and return the mask of CSE definitions and uses for the tree // void optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData); // Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2. bool optCSE_canSwap(GenTree* firstNode, GenTree* secondNode); struct optCSEcostCmpEx { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; struct optCSEcostCmpSz { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; void optCleanupCSEs(); #ifdef DEBUG void optEnsureClearCSEInfo(); #endif // DEBUG static bool Is_Shared_Const_CSE(size_t key) { return ((key & TARGET_SIGN_BIT) != 0); } // returns the encoded key static size_t Encode_Shared_Const_CSE_Value(size_t key) { return TARGET_SIGN_BIT | (key >> CSE_CONST_SHARED_LOW_BITS); } // returns the orginal key static size_t Decode_Shared_Const_CSE_Value(size_t enckey) { assert(Is_Shared_Const_CSE(enckey)); return (enckey & ~TARGET_SIGN_BIT) << CSE_CONST_SHARED_LOW_BITS; } /************************************************************************** * Value Number based CSEs *************************************************************************/ // String to use for formatting CSE numbers. Note that this is the positive number, e.g., from GET_CSE_INDEX(). #define FMT_CSE "CSE #%02u" public: void optOptimizeValnumCSEs(); protected: void optValnumCSE_Init(); unsigned optValnumCSE_Index(GenTree* tree, Statement* stmt); bool optValnumCSE_Locate(); void optValnumCSE_InitDataFlow(); void optValnumCSE_DataFlow(); void optValnumCSE_Availablity(); void optValnumCSE_Heuristic(); bool optDoCSE; // True when we have found a duplicate CSE tree bool optValnumCSE_phase; // True when we are executing the optOptimizeValnumCSEs() phase unsigned optCSECandidateCount; // Count of CSE's candidates unsigned optCSEstart; // The first local variable number that is a CSE unsigned optCSEcount; // The total count of CSE's introduced. weight_t optCSEweight; // The weight of the current block when we are doing PerformCSE bool optIsCSEcandidate(GenTree* tree); // lclNumIsTrueCSE returns true if the LclVar was introduced by the CSE phase of the compiler // bool lclNumIsTrueCSE(unsigned lclNum) const { return ((optCSEcount > 0) && (lclNum >= optCSEstart) && (lclNum < optCSEstart + optCSEcount)); } // lclNumIsCSE returns true if the LclVar should be treated like a CSE with regards to constant prop. // bool lclNumIsCSE(unsigned lclNum) const { return lvaGetDesc(lclNum)->lvIsCSE; } #ifdef DEBUG bool optConfigDisableCSE(); bool optConfigDisableCSE2(); #endif void optOptimizeCSEs(); struct isVarAssgDsc { GenTree* ivaSkip; ALLVARSET_TP ivaMaskVal; // Set of variables assigned to. This is a set of all vars, not tracked vars. #ifdef DEBUG void* ivaSelf; #endif unsigned ivaVar; // Variable we are interested in, or -1 varRefKinds ivaMaskInd; // What kind of indirect assignments are there? callInterf ivaMaskCall; // What kind of calls are there? bool ivaMaskIncomplete; // Variables not representable in ivaMaskVal were assigned to. }; static callInterf optCallInterf(GenTreeCall* call); public: // VN based copy propagation. // In DEBUG builds, we'd like to know the tree that the SSA definition was pushed for. // While for ordinary SSA defs it will be available (as an ASG) in the SSA descriptor, // for locals which will use "definitions from uses", it will not be, so we store it // in this class instead. class CopyPropSsaDef { LclSsaVarDsc* m_ssaDef; #ifdef DEBUG GenTree* m_defNode; #endif public: CopyPropSsaDef(LclSsaVarDsc* ssaDef, GenTree* defNode) : m_ssaDef(ssaDef) #ifdef DEBUG , m_defNode(defNode) #endif { } LclSsaVarDsc* GetSsaDef() const { return m_ssaDef; } #ifdef DEBUG GenTree* GetDefNode() const { return m_defNode; } #endif }; typedef ArrayStack<CopyPropSsaDef> CopyPropSsaDefStack; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, CopyPropSsaDefStack*> LclNumToLiveDefsMap; // Copy propagation functions. void optCopyProp(Statement* stmt, GenTreeLclVarCommon* tree, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); void optBlockCopyPropPopStacks(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optBlockCopyProp(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optCopyPropPushDef(GenTree* defNode, GenTreeLclVarCommon* lclNode, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); unsigned optIsSsaLocal(GenTreeLclVarCommon* lclNode); int optCopyProp_LclVarScore(const LclVarDsc* lclVarDsc, const LclVarDsc* copyVarDsc, bool preferOp2); void optVnCopyProp(); INDEBUG(void optDumpCopyPropStack(LclNumToLiveDefsMap* curSsaName)); /************************************************************************** * Early value propagation *************************************************************************/ struct SSAName { unsigned m_lvNum; unsigned m_ssaNum; SSAName(unsigned lvNum, unsigned ssaNum) : m_lvNum(lvNum), m_ssaNum(ssaNum) { } static unsigned GetHashCode(SSAName ssaNm) { return (ssaNm.m_lvNum << 16) | (ssaNm.m_ssaNum); } static bool Equals(SSAName ssaNm1, SSAName ssaNm2) { return (ssaNm1.m_lvNum == ssaNm2.m_lvNum) && (ssaNm1.m_ssaNum == ssaNm2.m_ssaNum); } }; #define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array #define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type. #define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores. #define OMF_HAS_NULLCHECK 0x00000008 // Method contains null check. #define OMF_HAS_FATPOINTER 0x00000010 // Method contains call, that needs fat pointer transformation. #define OMF_HAS_OBJSTACKALLOC 0x00000020 // Method contains an object allocated on the stack. #define OMF_HAS_GUARDEDDEVIRT 0x00000040 // Method contains guarded devirtualization candidate #define OMF_HAS_EXPRUNTIMELOOKUP 0x00000080 // Method contains a runtime lookup to an expandable dictionary. #define OMF_HAS_PATCHPOINT 0x00000100 // Method contains patchpoints #define OMF_NEEDS_GCPOLLS 0x00000200 // Method needs GC polls #define OMF_HAS_FROZEN_STRING 0x00000400 // Method has a frozen string (REF constant int), currently only on CoreRT. #define OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT 0x00000800 // Method contains partial compilation patchpoints #define OMF_HAS_TAILCALL_SUCCESSOR 0x00001000 // Method has potential tail call in a non BBJ_RETURN block bool doesMethodHaveFatPointer() { return (optMethodFlags & OMF_HAS_FATPOINTER) != 0; } void setMethodHasFatPointer() { optMethodFlags |= OMF_HAS_FATPOINTER; } void clearMethodHasFatPointer() { optMethodFlags &= ~OMF_HAS_FATPOINTER; } void addFatPointerCandidate(GenTreeCall* call); bool doesMethodHaveFrozenString() const { return (optMethodFlags & OMF_HAS_FROZEN_STRING) != 0; } void setMethodHasFrozenString() { optMethodFlags |= OMF_HAS_FROZEN_STRING; } bool doesMethodHaveGuardedDevirtualization() const { return (optMethodFlags & OMF_HAS_GUARDEDDEVIRT) != 0; } void setMethodHasGuardedDevirtualization() { optMethodFlags |= OMF_HAS_GUARDEDDEVIRT; } void clearMethodHasGuardedDevirtualization() { optMethodFlags &= ~OMF_HAS_GUARDEDDEVIRT; } void considerGuardedDevirtualization(GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)); void addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood); bool doesMethodHaveExpRuntimeLookup() { return (optMethodFlags & OMF_HAS_EXPRUNTIMELOOKUP) != 0; } void setMethodHasExpRuntimeLookup() { optMethodFlags |= OMF_HAS_EXPRUNTIMELOOKUP; } void clearMethodHasExpRuntimeLookup() { optMethodFlags &= ~OMF_HAS_EXPRUNTIMELOOKUP; } void addExpRuntimeLookupCandidate(GenTreeCall* call); bool doesMethodHavePatchpoints() { return (optMethodFlags & OMF_HAS_PATCHPOINT) != 0; } void setMethodHasPatchpoint() { optMethodFlags |= OMF_HAS_PATCHPOINT; } bool doesMethodHavePartialCompilationPatchpoints() { return (optMethodFlags & OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT) != 0; } void setMethodHasPartialCompilationPatchpoint() { optMethodFlags |= OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT; } unsigned optMethodFlags; bool doesMethodHaveNoReturnCalls() { return optNoReturnCallCount > 0; } void setMethodHasNoReturnCalls() { optNoReturnCallCount++; } unsigned optNoReturnCallCount; // Recursion bound controls how far we can go backwards tracking for a SSA value. // No throughput diff was found with backward walk bound between 3-8. static const int optEarlyPropRecurBound = 5; enum class optPropKind { OPK_INVALID, OPK_ARRAYLEN, OPK_NULLCHECK }; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, GenTree*> LocalNumberToNullCheckTreeMap; GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block)); GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth); GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind); GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optDoEarlyPropForBlock(BasicBlock* block); bool optDoEarlyPropForFunc(); void optEarlyProp(); void optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optIsNullCheckFoldingLegal(GenTree* tree, GenTree* nullCheckTree, GenTree** nullCheckParent, Statement** nullCheckStmt); bool optCanMoveNullCheckPastTree(GenTree* tree, unsigned nullCheckLclNum, bool isInsideTry, bool checkSideEffectSummary); #if DEBUG void optCheckFlagsAreSet(unsigned methodFlag, const char* methodFlagStr, unsigned bbFlag, const char* bbFlagStr, GenTree* tree, BasicBlock* basicBlock); #endif // Redundant branch opts // PhaseStatus optRedundantBranches(); bool optRedundantRelop(BasicBlock* const block); bool optRedundantBranch(BasicBlock* const block); bool optJumpThread(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop); bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock); /************************************************************************** * Value/Assertion propagation *************************************************************************/ public: // Data structures for assertion prop BitVecTraits* apTraits; ASSERT_TP apFull; enum optAssertionKind { OAK_INVALID, OAK_EQUAL, OAK_NOT_EQUAL, OAK_SUBRANGE, OAK_NO_THROW, OAK_COUNT }; enum optOp1Kind { O1K_INVALID, O1K_LCLVAR, O1K_ARR_BND, O1K_BOUND_OPER_BND, O1K_BOUND_LOOP_BND, O1K_CONSTANT_LOOP_BND, O1K_CONSTANT_LOOP_BND_UN, O1K_EXACT_TYPE, O1K_SUBTYPE, O1K_VALUE_NUMBER, O1K_COUNT }; enum optOp2Kind { O2K_INVALID, O2K_LCLVAR_COPY, O2K_IND_CNS_INT, O2K_CONST_INT, O2K_CONST_LONG, O2K_CONST_DOUBLE, O2K_ZEROOBJ, O2K_SUBRANGE, O2K_COUNT }; struct AssertionDsc { optAssertionKind assertionKind; struct SsaVar { unsigned lclNum; // assigned to or property of this local var number unsigned ssaNum; }; struct ArrBnd { ValueNum vnIdx; ValueNum vnLen; }; struct AssertionDscOp1 { optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype ValueNum vn; union { SsaVar lcl; ArrBnd bnd; }; } op1; struct AssertionDscOp2 { optOp2Kind kind; // a const or copy assignment ValueNum vn; struct IntVal { ssize_t iconVal; // integer #if !defined(HOST_64BIT) unsigned padding; // unused; ensures iconFlags does not overlap lconVal #endif GenTreeFlags iconFlags; // gtFlags }; union { struct { SsaVar lcl; FieldSeqNode* zeroOffsetFieldSeq; }; IntVal u1; __int64 lconVal; double dconVal; IntegralRange u2; }; } op2; bool IsCheckedBoundArithBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_OPER_BND); } bool IsCheckedBoundBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_LOOP_BND); } bool IsConstantBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND)); } bool IsConstantBoundUnsigned() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND_UN)); } bool IsBoundsCheckNoThrow() { return ((assertionKind == OAK_NO_THROW) && (op1.kind == O1K_ARR_BND)); } bool IsCopyAssertion() { return ((assertionKind == OAK_EQUAL) && (op1.kind == O1K_LCLVAR) && (op2.kind == O2K_LCLVAR_COPY)); } bool IsConstantInt32Assertion() { return ((assertionKind == OAK_EQUAL) || (assertionKind == OAK_NOT_EQUAL)) && (op2.kind == O2K_CONST_INT); } static bool SameKind(AssertionDsc* a1, AssertionDsc* a2) { return a1->assertionKind == a2->assertionKind && a1->op1.kind == a2->op1.kind && a1->op2.kind == a2->op2.kind; } static bool ComplementaryKind(optAssertionKind kind, optAssertionKind kind2) { if (kind == OAK_EQUAL) { return kind2 == OAK_NOT_EQUAL; } else if (kind == OAK_NOT_EQUAL) { return kind2 == OAK_EQUAL; } return false; } bool HasSameOp1(AssertionDsc* that, bool vnBased) { if (op1.kind != that->op1.kind) { return false; } else if (op1.kind == O1K_ARR_BND) { assert(vnBased); return (op1.bnd.vnIdx == that->op1.bnd.vnIdx) && (op1.bnd.vnLen == that->op1.bnd.vnLen); } else { return ((vnBased && (op1.vn == that->op1.vn)) || (!vnBased && (op1.lcl.lclNum == that->op1.lcl.lclNum))); } } bool HasSameOp2(AssertionDsc* that, bool vnBased) { if (op2.kind != that->op2.kind) { return false; } switch (op2.kind) { case O2K_IND_CNS_INT: case O2K_CONST_INT: return ((op2.u1.iconVal == that->op2.u1.iconVal) && (op2.u1.iconFlags == that->op2.u1.iconFlags)); case O2K_CONST_LONG: return (op2.lconVal == that->op2.lconVal); case O2K_CONST_DOUBLE: // exact match because of positive and negative zero. return (memcmp(&op2.dconVal, &that->op2.dconVal, sizeof(double)) == 0); case O2K_ZEROOBJ: return true; case O2K_LCLVAR_COPY: return (op2.lcl.lclNum == that->op2.lcl.lclNum) && (!vnBased || op2.lcl.ssaNum == that->op2.lcl.ssaNum) && (op2.zeroOffsetFieldSeq == that->op2.zeroOffsetFieldSeq); case O2K_SUBRANGE: return op2.u2.Equals(that->op2.u2); case O2K_INVALID: // we will return false break; default: assert(!"Unexpected value for op2.kind in AssertionDsc."); break; } return false; } bool Complementary(AssertionDsc* that, bool vnBased) { return ComplementaryKind(assertionKind, that->assertionKind) && HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } bool Equals(AssertionDsc* that, bool vnBased) { if (assertionKind != that->assertionKind) { return false; } else if (assertionKind == OAK_NO_THROW) { assert(op2.kind == O2K_INVALID); return HasSameOp1(that, vnBased); } else { return HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } } }; protected: static fgWalkPreFn optAddCopiesCallback; static fgWalkPreFn optVNAssertionPropCurStmtVisitor; unsigned optAddCopyLclNum; GenTree* optAddCopyAsgnNode; bool optLocalAssertionProp; // indicates that we are performing local assertion prop bool optAssertionPropagated; // set to true if we modified the trees bool optAssertionPropagatedCurrentStmt; #ifdef DEBUG GenTree* optAssertionPropCurrentTree; #endif AssertionIndex* optComplementaryAssertionMap; JitExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions // using the value of a local var) for each local var AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments AssertionIndex optAssertionCount; // total number of assertions in the assertion table AssertionIndex optMaxAssertionCount; public: void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); fgWalkResult optVNConstantPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test); GenTree* optVNConstantPropOnTree(BasicBlock* block, GenTree* tree); GenTree* optExtractSideEffListFromConst(GenTree* tree); AssertionIndex GetAssertionCount() { return optAssertionCount; } ASSERT_TP* bbJtrueAssertionOut; typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP> ValueNumToAssertsMap; ValueNumToAssertsMap* optValueNumToAsserts; // Assertion prop helpers. ASSERT_TP& GetAssertionDep(unsigned lclNum); AssertionDsc* optGetAssertion(AssertionIndex assertIndex); void optAssertionInit(bool isLocalProp); void optAssertionTraitsInit(AssertionIndex assertionCount); void optAssertionReset(AssertionIndex limit); void optAssertionRemove(AssertionIndex index); // Assertion prop data flow functions. void optAssertionPropMain(); Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt); bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags); ASSERT_TP* optInitAssertionDataflowFlags(); ASSERT_TP* optComputeAssertionGen(); // Assertion Gen functions. void optAssertionGen(GenTree* tree); AssertionIndex optAssertionGenCast(GenTreeCast* cast); AssertionIndex optAssertionGenPhiDefn(GenTree* tree); AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree); AssertionInfo optAssertionGenJtrue(GenTree* tree); AssertionIndex optCreateJtrueAssertions(GenTree* op1, GenTree* op2, Compiler::optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFindComplementary(AssertionIndex assertionIndex); void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index); // Assertion creation functions. AssertionIndex optCreateAssertion(GenTree* op1, GenTree* op2, optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFinalizeCreatingAssertion(AssertionDsc* assertion); bool optTryExtractSubrangeAssertion(GenTree* source, IntegralRange* pRange); void optCreateComplementaryAssertion(AssertionIndex assertionIndex, GenTree* op1, GenTree* op2, bool helperCallArgs = false); bool optAssertionVnInvolvesNan(AssertionDsc* assertion); AssertionIndex optAddAssertion(AssertionDsc* assertion); void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index); #ifdef DEBUG void optPrintVnAssertionMapping(); #endif ASSERT_TP optGetVnMappedAssertions(ValueNum vn); // Used for respective assertion propagations. AssertionIndex optAssertionIsSubrange(GenTree* tree, IntegralRange range, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsSubtype(GenTree* tree, GenTree* methodTableArg, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased)); bool optAssertionIsNonNull(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex)); AssertionIndex optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTree* op1, GenTree* op2); AssertionIndex optGlobalAssertionIsEqualOrNotEqualZero(ASSERT_VALARG_TP assertions, GenTree* op1); AssertionIndex optLocalAssertionIsEqualOrNotEqual( optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions); // Assertion prop for lcl var functions. bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc); GenTree* optCopyAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); GenTree* optConstantAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions); // Assertion propagation functions. GenTree* optAssertionProp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt, BasicBlock* block); GenTree* optAssertionProp_LclVar(ASSERT_VALARG_TP assertions, GenTreeLclVarCommon* tree, Statement* stmt); GenTree* optAssertionProp_Asg(ASSERT_VALARG_TP assertions, GenTreeOp* asg, Statement* stmt); GenTree* optAssertionProp_Return(ASSERT_VALARG_TP assertions, GenTreeUnOp* ret, Statement* stmt); GenTree* optAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Cast(ASSERT_VALARG_TP assertions, GenTreeCast* cast, Statement* stmt); GenTree* optAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call, Statement* stmt); GenTree* optAssertionProp_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Comma(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Update(GenTree* newTree, GenTree* tree, Statement* stmt); GenTree* optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call); // Implied assertion functions. void optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& activeAssertions); void optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions); void optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionDsc* depAssertion, ASSERT_TP& result); void optImpliedByConstAssertion(AssertionDsc* curAssertion, ASSERT_TP& result); #ifdef DEBUG void optPrintAssertion(AssertionDsc* newAssertion, AssertionIndex assertionIndex = 0); void optPrintAssertionIndex(AssertionIndex index); void optPrintAssertionIndices(ASSERT_TP assertions); void optDebugCheckAssertion(AssertionDsc* assertion); void optDebugCheckAssertions(AssertionIndex AssertionIndex); #endif static void optDumpAssertionIndices(const char* header, ASSERT_TP assertions, const char* footer = nullptr); static void optDumpAssertionIndices(ASSERT_TP assertions, const char* footer = nullptr); void optAddCopies(); /************************************************************************** * Range checks *************************************************************************/ public: struct LoopCloneVisitorInfo { LoopCloneContext* context; unsigned loopNum; Statement* stmt; LoopCloneVisitorInfo(LoopCloneContext* context, unsigned loopNum, Statement* stmt) : context(context), loopNum(loopNum), stmt(nullptr) { } }; bool optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum); bool optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optReconstructArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context); static fgWalkPreFn optCanOptimizeByLoopCloningVisitor; fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info); bool optObtainLoopCloningOpts(LoopCloneContext* context); bool optIsLoopClonable(unsigned loopInd); bool optLoopCloningEnabled(); #ifdef DEBUG void optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore); #endif void optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool fastPath)); bool optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context); bool optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context); BasicBlock* optInsertLoopChoiceConditions(LoopCloneContext* context, unsigned loopNum, BasicBlock* slowHead, BasicBlock* insertAfter); protected: ssize_t optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk)); bool optReachWithoutCall(BasicBlock* srcBB, BasicBlock* dstBB); protected: bool optLoopsMarked; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX RegAlloc XX XX XX XX Does the register allocation and puts the remaining lclVars on the stack XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: regNumber raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc); void raMarkStkVars(); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(TARGET_AMD64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); return (type == TYP_SIMD32); } #elif defined(TARGET_ARM64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); // ARM64 ABI FP Callee save registers only require Callee to save lower 8 Bytes // For SIMD types longer than 8 bytes Caller is responsible for saving and restoring Upper bytes. return ((type == TYP_SIMD16) || (type == TYP_SIMD12)); } #else // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #error("Unknown target architecture for FEATURE_SIMD") #endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE protected: // Some things are used by both LSRA and regpredict allocators. FrameType rpFrameType; bool rpMustCreateEBPCalled; // Set to true after we have called rpMustCreateEBPFrame once bool rpMustCreateEBPFrame(INDEBUG(const char** wbReason)); private: Lowering* m_pLowering; // Lowering; needed to Lower IR that's added or modified after Lowering. LinearScanInterface* m_pLinearScan; // Linear Scan allocator /* raIsVarargsStackArg is called by raMaskStkVars and by lvaComputeRefCounts. It identifies the special case where a varargs function has a parameter passed on the stack, other than the special varargs handle. Such parameters require special treatment, because they cannot be tracked by the GC (their offsets in the stack are not known at compile time). */ bool raIsVarargsStackArg(unsigned lclNum) { #ifdef TARGET_X86 LclVarDsc* varDsc = lvaGetDesc(lclNum); assert(varDsc->lvIsParam); return (info.compIsVarArgs && !varDsc->lvIsRegArg && (lclNum != lvaVarargsHandleArg)); #else // TARGET_X86 return false; #endif // TARGET_X86 } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX EEInterface XX XX XX XX Get to the class and method info from the Execution Engine given XX XX tokens for the class and method XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Get handles void eeGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedToken, CORINFO_CALLINFO_FLAGS flags, CORINFO_CALL_INFO* pResult); void eeGetFieldInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS flags, CORINFO_FIELD_INFO* pResult); // Get the flags bool eeIsValueClass(CORINFO_CLASS_HANDLE clsHnd); bool eeIsIntrinsic(CORINFO_METHOD_HANDLE ftn); bool eeIsFieldStatic(CORINFO_FIELD_HANDLE fldHnd); var_types eeGetFieldType(CORINFO_FIELD_HANDLE fldHnd, CORINFO_CLASS_HANDLE* pStructHnd = nullptr); #if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(TRACK_LSRA_STATS) const char* eeGetMethodName(CORINFO_METHOD_HANDLE hnd, const char** className); const char* eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd); unsigned compMethodHash(CORINFO_METHOD_HANDLE methodHandle); bool eeIsNativeMethod(CORINFO_METHOD_HANDLE method); CORINFO_METHOD_HANDLE eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method); #endif var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned); CORINFO_CLASS_HANDLE eeGetArgClass(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE list); CORINFO_CLASS_HANDLE eeGetClassFromContext(CORINFO_CONTEXT_HANDLE context); unsigned eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); static unsigned eeGetArgSizeAlignment(var_types type, bool isFloatHfa); // VOM info, method sigs void eeGetSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetCallSiteSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetMethodSig(CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* retSig, CORINFO_CLASS_HANDLE owner = nullptr); // Method entry-points, instrs CORINFO_METHOD_HANDLE eeMarkNativeTarget(CORINFO_METHOD_HANDLE method); CORINFO_EE_INFO eeInfo; bool eeInfoInitialized; CORINFO_EE_INFO* eeGetEEInfo(); // Gets the offset of a SDArray's first element static unsigned eeGetArrayDataOffset(); // Get the offset of a MDArray's first element static unsigned eeGetMDArrayDataOffset(unsigned rank); // Get the offset of a MDArray's dimension length for a given dimension. static unsigned eeGetMDArrayLengthOffset(unsigned rank, unsigned dimension); // Get the offset of a MDArray's lower bound for a given dimension. static unsigned eeGetMDArrayLowerBoundOffset(unsigned rank, unsigned dimension); GenTree* eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig); // Returns the page size for the target machine as reported by the EE. target_size_t eeGetPageSize() { return (target_size_t)eeGetEEInfo()->osPageSize; } //------------------------------------------------------------------------ // VirtualStubParam: virtual stub dispatch extra parameter (slot address). // // It represents Abi and target specific registers for the parameter. // class VirtualStubParamInfo { public: VirtualStubParamInfo(bool isCoreRTABI) { #if defined(TARGET_X86) reg = REG_EAX; regMask = RBM_EAX; #elif defined(TARGET_AMD64) if (isCoreRTABI) { reg = REG_R10; regMask = RBM_R10; } else { reg = REG_R11; regMask = RBM_R11; } #elif defined(TARGET_ARM) if (isCoreRTABI) { reg = REG_R12; regMask = RBM_R12; } else { reg = REG_R4; regMask = RBM_R4; } #elif defined(TARGET_ARM64) reg = REG_R11; regMask = RBM_R11; #else #error Unsupported or unset target architecture #endif } regNumber GetReg() const { return reg; } _regMask_enum GetRegMask() const { return regMask; } private: regNumber reg; _regMask_enum regMask; }; VirtualStubParamInfo* virtualStubParamInfo; bool IsTargetAbi(CORINFO_RUNTIME_ABI abi) { return eeGetEEInfo()->targetAbi == abi; } bool generateCFIUnwindCodes() { #if defined(FEATURE_CFI_SUPPORT) return TargetOS::IsUnix && IsTargetAbi(CORINFO_CORERT_ABI); #else return false; #endif } // Debugging support - Line number info void eeGetStmtOffsets(); unsigned eeBoundariesCount; ICorDebugInfo::OffsetMapping* eeBoundaries; // Boundaries to report to the EE void eeSetLIcount(unsigned count); void eeSetLIinfo(unsigned which, UNATIVE_OFFSET offs, IPmappingDscKind kind, const ILLocation& loc); void eeSetLIdone(); #ifdef DEBUG static void eeDispILOffs(IL_OFFSET offs); static void eeDispSourceMappingOffs(uint32_t offs); static void eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line); void eeDispLineInfos(); #endif // DEBUG // Debugging support - Local var info void eeGetVars(); unsigned eeVarsCount; struct VarResultInfo { UNATIVE_OFFSET startOffset; UNATIVE_OFFSET endOffset; DWORD varNumber; CodeGenInterface::siVarLoc loc; } * eeVars; void eeSetLVcount(unsigned count); void eeSetLVinfo(unsigned which, UNATIVE_OFFSET startOffs, UNATIVE_OFFSET length, unsigned varNum, const CodeGenInterface::siVarLoc& loc); void eeSetLVdone(); #ifdef DEBUG void eeDispVar(ICorDebugInfo::NativeVarInfo* var); void eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars); #endif // DEBUG // ICorJitInfo wrappers void eeReserveUnwindInfo(bool isFunclet, bool isColdCode, ULONG unwindSize); void eeAllocUnwindInfo(BYTE* pHotCode, BYTE* pColdCode, ULONG startOffset, ULONG endOffset, ULONG unwindSize, BYTE* pUnwindBlock, CorJitFuncKind funcKind); void eeSetEHcount(unsigned cEH); void eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause); WORD eeGetRelocTypeHint(void* target); // ICorStaticInfo wrapper functions bool eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken); #if defined(UNIX_AMD64_ABI) #ifdef DEBUG static void dumpSystemVClassificationType(SystemVClassificationType ct); #endif // DEBUG void eeGetSystemVAmd64PassStructInRegisterDescriptor( /*IN*/ CORINFO_CLASS_HANDLE structHnd, /*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr); #endif // UNIX_AMD64_ABI template <typename ParamType> bool eeRunWithErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithErrorTrapImp(void (*function)(void*), void* param); template <typename ParamType> bool eeRunWithSPMIErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithSPMIErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param); // Utility functions const char* eeGetFieldName(CORINFO_FIELD_HANDLE fieldHnd, const char** classNamePtr = nullptr); #if defined(DEBUG) const WCHAR* eeGetCPString(size_t stringHandle); #endif const char* eeGetClassName(CORINFO_CLASS_HANDLE clsHnd); static CORINFO_METHOD_HANDLE eeFindHelper(unsigned helper); static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method); static bool IsSharedStaticHelper(GenTree* tree); static bool IsGcSafePoint(GenTreeCall* call); static CORINFO_FIELD_HANDLE eeFindJitDataOffs(unsigned jitDataOffs); // returns true/false if 'field' is a Jit Data offset static bool eeIsJitDataOffs(CORINFO_FIELD_HANDLE field); // returns a number < 0 if 'field' is not a Jit Data offset, otherwise the data offset (limited to 2GB) static int eeGetJitDataOffs(CORINFO_FIELD_HANDLE field); /*****************************************************************************/ /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX CodeGenerator XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: CodeGenInterface* codeGen; // Record the instr offset mapping to the generated code jitstd::list<IPmappingDsc> genIPmappings; #ifdef DEBUG jitstd::list<PreciseIPMapping> genPreciseIPmappings; #endif // Managed RetVal - A side hash table meant to record the mapping from a // GT_CALL node to its debug info. This info is used to emit sequence points // that can be used by debugger to determine the native offset at which the // managed RetVal will be available. // // In fact we can store debug info in a GT_CALL node. This was ruled out in // favor of a side table for two reasons: 1) We need debug info for only those // GT_CALL nodes (created during importation) that correspond to an IL call and // whose return type is other than TYP_VOID. 2) GT_CALL node is a frequently used // structure and IL offset is needed only when generating debuggable code. Therefore // it is desirable to avoid memory size penalty in retail scenarios. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, DebugInfo> CallSiteDebugInfoTable; CallSiteDebugInfoTable* genCallSite2DebugInfoMap; unsigned genReturnLocal; // Local number for the return value when applicable. BasicBlock* genReturnBB; // jumped to when not optimizing for speed. // The following properties are part of CodeGenContext. Getters are provided here for // convenience and backward compatibility, but the properties can only be set by invoking // the setter on CodeGenContext directly. emitter* GetEmitter() const { return codeGen->GetEmitter(); } bool isFramePointerUsed() const { return codeGen->isFramePointerUsed(); } bool GetInterruptible() { return codeGen->GetInterruptible(); } void SetInterruptible(bool value) { codeGen->SetInterruptible(value); } #if DOUBLE_ALIGN const bool genDoubleAlign() { return codeGen->doDoubleAlign(); } DWORD getCanDoubleAlign(); bool shouldDoubleAlign(unsigned refCntStk, unsigned refCntReg, weight_t refCntWtdReg, unsigned refCntStkParam, weight_t refCntWtdStkDbl); #endif // DOUBLE_ALIGN bool IsFullPtrRegMapRequired() { return codeGen->IsFullPtrRegMapRequired(); } void SetFullPtrRegMapRequired(bool value) { codeGen->SetFullPtrRegMapRequired(value); } // Things that MAY belong either in CodeGen or CodeGenContext #if defined(FEATURE_EH_FUNCLETS) FuncInfoDsc* compFuncInfos; unsigned short compCurrFuncIdx; unsigned short compFuncInfoCount; unsigned short compFuncCount() { assert(fgFuncletsCreated); return compFuncInfoCount; } #else // !FEATURE_EH_FUNCLETS // This is a no-op when there are no funclets! void genUpdateCurrentFunclet(BasicBlock* block) { return; } FuncInfoDsc compFuncInfoRoot; static const unsigned compCurrFuncIdx = 0; unsigned short compFuncCount() { return 1; } #endif // !FEATURE_EH_FUNCLETS FuncInfoDsc* funCurrentFunc(); void funSetCurrentFunc(unsigned funcIdx); FuncInfoDsc* funGetFunc(unsigned funcIdx); unsigned int funGetFuncIdx(BasicBlock* block); // LIVENESS VARSET_TP compCurLife; // current live variables GenTree* compCurLifeTree; // node after which compCurLife has been computed // Compare the given "newLife" with last set of live variables and update // codeGen "gcInfo", siScopes, "regSet" with the new variable's homes/liveness. template <bool ForCodeGen> void compChangeLife(VARSET_VALARG_TP newLife); // Update the GC's masks, register's masks and reports change on variable's homes given a set of // current live variables if changes have happened since "compCurLife". template <bool ForCodeGen> inline void compUpdateLife(VARSET_VALARG_TP newLife); // Gets a register mask that represent the kill set for a helper call since // not all JIT Helper calls follow the standard ABI on the target architecture. regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper); #ifdef TARGET_ARM // Requires that "varDsc" be a promoted struct local variable being passed as an argument, beginning at // "firstArgRegNum", which is assumed to have already been aligned to the register alignment restriction of the // struct type. Adds bits to "*pArgSkippedRegMask" for any argument registers *not* used in passing "varDsc" -- // i.e., internal "holes" caused by internal alignment constraints. For example, if the struct contained an int and // a double, and we at R0 (on ARM), then R1 would be skipped, and the bit for R1 would be added to the mask. void fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask); #endif // TARGET_ARM // If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR // node, else NULL. static GenTreeLclVar* fgIsIndirOfAddrOfLocal(GenTree* tree); // This map is indexed by GT_OBJ nodes that are address of promoted struct variables, which // have been annotated with the GTF_VAR_DEATH flag. If such a node is *not* mapped in this // table, one may assume that all the (tracked) field vars die at this GT_OBJ. Otherwise, // the node maps to a pointer to a VARSET_TP, containing set bits for each of the tracked field // vars of the promoted struct local that go dead at the given node (the set bits are the bits // for the tracked var indices of the field vars, as in a live var set). // // The map is allocated on demand so all map operations should use one of the following three // wrapper methods. NodeToVarsetPtrMap* m_promotedStructDeathVars; NodeToVarsetPtrMap* GetPromotedStructDeathVars() { if (m_promotedStructDeathVars == nullptr) { m_promotedStructDeathVars = new (getAllocator()) NodeToVarsetPtrMap(getAllocator()); } return m_promotedStructDeathVars; } void ClearPromotedStructDeathVars() { if (m_promotedStructDeathVars != nullptr) { m_promotedStructDeathVars->RemoveAll(); } } bool LookupPromotedStructDeathVars(GenTree* tree, VARSET_TP** bits) { *bits = nullptr; bool result = false; if (m_promotedStructDeathVars != nullptr) { result = m_promotedStructDeathVars->Lookup(tree, bits); } return result; } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX UnwindInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #if !defined(__GNUC__) #pragma region Unwind information #endif public: // // Infrastructure functions: start/stop/reserve/emit. // void unwindBegProlog(); void unwindEndProlog(); void unwindBegEpilog(); void unwindEndEpilog(); void unwindReserve(); void unwindEmit(void* pHotCode, void* pColdCode); // // Specific unwind information functions: called by code generation to indicate a particular // prolog or epilog unwindable instruction has been generated. // void unwindPush(regNumber reg); void unwindAllocStack(unsigned size); void unwindSetFrameReg(regNumber reg, unsigned offset); void unwindSaveReg(regNumber reg, unsigned offset); #if defined(TARGET_ARM) void unwindPushMaskInt(regMaskTP mask); void unwindPushMaskFloat(regMaskTP mask); void unwindPopMaskInt(regMaskTP mask); void unwindPopMaskFloat(regMaskTP mask); void unwindBranch16(); // The epilog terminates with a 16-bit branch (e.g., "bx lr") void unwindNop(unsigned codeSizeInBytes); // Generate unwind NOP code. 'codeSizeInBytes' is 2 or 4 bytes. Only // called via unwindPadding(). void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. #endif // TARGET_ARM #if defined(TARGET_ARM64) void unwindNop(); void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. void unwindSaveReg(regNumber reg, int offset); // str reg, [sp, #offset] void unwindSaveRegPreindexed(regNumber reg, int offset); // str reg, [sp, #offset]! void unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset] void unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]! void unwindSaveNext(); // unwind code: save_next void unwindReturn(regNumber reg); // ret lr #endif // defined(TARGET_ARM64) // // Private "helper" functions for the unwind implementation. // private: #if defined(FEATURE_EH_FUNCLETS) void unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData, /* OUT */ emitLocation** ppStartLoc, /* OUT */ emitLocation** ppEndLoc); #endif // FEATURE_EH_FUNCLETS void unwindReserveFunc(FuncInfoDsc* func); void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS)) void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode); void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode); #endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS) UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func); #if defined(TARGET_AMD64) void unwindBegPrologWindows(); void unwindPushWindows(regNumber reg); void unwindAllocStackWindows(unsigned size); void unwindSetFrameRegWindows(regNumber reg, unsigned offset); void unwindSaveRegWindows(regNumber reg, unsigned offset); #ifdef UNIX_AMD64_ABI void unwindSaveRegCFI(regNumber reg, unsigned offset); #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM) void unwindPushPopMaskInt(regMaskTP mask, bool useOpsize16); void unwindPushPopMaskFloat(regMaskTP mask); #endif // TARGET_ARM #if defined(FEATURE_CFI_SUPPORT) short mapRegNumToDwarfReg(regNumber reg); void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0); void unwindPushPopCFI(regNumber reg); void unwindBegPrologCFI(); void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat); void unwindAllocStackCFI(unsigned size); void unwindSetFrameRegCFI(regNumber reg, unsigned offset); void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #ifdef DEBUG void DumpCfiInfo(bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET endOffset, DWORD cfiCodeBytes, const CFI_CODE* const pCfiCode); #endif #endif // FEATURE_CFI_SUPPORT #if !defined(__GNUC__) #pragma endregion // Note: region is NOT under !defined(__GNUC__) #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX SIMD XX XX XX XX Info about SIMD types, methods and the SIMD assembly (i.e. the assembly XX XX that contains the distinguished, well-known SIMD type definitions). XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ bool IsBaselineSimdIsaSupported() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compOpportunisticallyDependsOn(minimumIsa); #else return false; #endif } #if defined(DEBUG) bool IsBaselineSimdIsaSupportedDebugOnly() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compIsaSupportedDebugOnly(minimumIsa); #else return false; #endif // FEATURE_SIMD } #endif // DEBUG // Get highest available level for SIMD codegen SIMDLevel getSIMDSupportLevel() { #if defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX2)) { return SIMD_AVX2_Supported; } if (compOpportunisticallyDependsOn(InstructionSet_SSE42)) { return SIMD_SSE4_Supported; } // min bar is SSE2 return SIMD_SSE2_Supported; #else assert(!"Available instruction set(s) for SIMD codegen is not defined for target arch"); unreached(); return SIMD_Not_Supported; #endif } bool isIntrinsicType(CORINFO_CLASS_HANDLE clsHnd) { return info.compCompHnd->isIntrinsicType(clsHnd); } const char* getClassNameFromMetadata(CORINFO_CLASS_HANDLE cls, const char** namespaceName) { return info.compCompHnd->getClassNameFromMetadata(cls, namespaceName); } CORINFO_CLASS_HANDLE getTypeInstantiationArgument(CORINFO_CLASS_HANDLE cls, unsigned index) { return info.compCompHnd->getTypeInstantiationArgument(cls, index); } #ifdef FEATURE_SIMD // Have we identified any SIMD types? // This is currently used by struct promotion to avoid getting type information for a struct // field to see if it is a SIMD type, if we haven't seen any SIMD types or operations in // the method. bool _usesSIMDTypes; bool usesSIMDTypes() { return _usesSIMDTypes; } void setUsesSIMDTypes(bool value) { _usesSIMDTypes = value; } // This is a temp lclVar allocated on the stack as TYP_SIMD. It is used to implement intrinsics // that require indexed access to the individual fields of the vector, which is not well supported // by the hardware. It is allocated when/if such situations are encountered during Lowering. unsigned lvaSIMDInitTempVarNum; struct SIMDHandlesCache { // SIMD Types CORINFO_CLASS_HANDLE SIMDFloatHandle; CORINFO_CLASS_HANDLE SIMDDoubleHandle; CORINFO_CLASS_HANDLE SIMDIntHandle; CORINFO_CLASS_HANDLE SIMDUShortHandle; CORINFO_CLASS_HANDLE SIMDUByteHandle; CORINFO_CLASS_HANDLE SIMDShortHandle; CORINFO_CLASS_HANDLE SIMDByteHandle; CORINFO_CLASS_HANDLE SIMDLongHandle; CORINFO_CLASS_HANDLE SIMDUIntHandle; CORINFO_CLASS_HANDLE SIMDULongHandle; CORINFO_CLASS_HANDLE SIMDNIntHandle; CORINFO_CLASS_HANDLE SIMDNUIntHandle; CORINFO_CLASS_HANDLE SIMDVector2Handle; CORINFO_CLASS_HANDLE SIMDVector3Handle; CORINFO_CLASS_HANDLE SIMDVector4Handle; CORINFO_CLASS_HANDLE SIMDVectorHandle; #ifdef FEATURE_HW_INTRINSICS #if defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector64FloatHandle; CORINFO_CLASS_HANDLE Vector64DoubleHandle; CORINFO_CLASS_HANDLE Vector64IntHandle; CORINFO_CLASS_HANDLE Vector64UShortHandle; CORINFO_CLASS_HANDLE Vector64UByteHandle; CORINFO_CLASS_HANDLE Vector64ShortHandle; CORINFO_CLASS_HANDLE Vector64ByteHandle; CORINFO_CLASS_HANDLE Vector64LongHandle; CORINFO_CLASS_HANDLE Vector64UIntHandle; CORINFO_CLASS_HANDLE Vector64ULongHandle; CORINFO_CLASS_HANDLE Vector64NIntHandle; CORINFO_CLASS_HANDLE Vector64NUIntHandle; #endif // defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector128FloatHandle; CORINFO_CLASS_HANDLE Vector128DoubleHandle; CORINFO_CLASS_HANDLE Vector128IntHandle; CORINFO_CLASS_HANDLE Vector128UShortHandle; CORINFO_CLASS_HANDLE Vector128UByteHandle; CORINFO_CLASS_HANDLE Vector128ShortHandle; CORINFO_CLASS_HANDLE Vector128ByteHandle; CORINFO_CLASS_HANDLE Vector128LongHandle; CORINFO_CLASS_HANDLE Vector128UIntHandle; CORINFO_CLASS_HANDLE Vector128ULongHandle; CORINFO_CLASS_HANDLE Vector128NIntHandle; CORINFO_CLASS_HANDLE Vector128NUIntHandle; #if defined(TARGET_XARCH) CORINFO_CLASS_HANDLE Vector256FloatHandle; CORINFO_CLASS_HANDLE Vector256DoubleHandle; CORINFO_CLASS_HANDLE Vector256IntHandle; CORINFO_CLASS_HANDLE Vector256UShortHandle; CORINFO_CLASS_HANDLE Vector256UByteHandle; CORINFO_CLASS_HANDLE Vector256ShortHandle; CORINFO_CLASS_HANDLE Vector256ByteHandle; CORINFO_CLASS_HANDLE Vector256LongHandle; CORINFO_CLASS_HANDLE Vector256UIntHandle; CORINFO_CLASS_HANDLE Vector256ULongHandle; CORINFO_CLASS_HANDLE Vector256NIntHandle; CORINFO_CLASS_HANDLE Vector256NUIntHandle; #endif // defined(TARGET_XARCH) #endif // FEATURE_HW_INTRINSICS SIMDHandlesCache() { memset(this, 0, sizeof(*this)); } }; SIMDHandlesCache* m_simdHandleCache; // Get an appropriate "zero" for the given type and class handle. GenTree* gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle); // Get the handle for a SIMD type. CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, CorInfoType simdBaseJitType) { if (m_simdHandleCache == nullptr) { // This may happen if the JIT generates SIMD node on its own, without importing them. // Otherwise getBaseJitTypeAndSizeOfSIMDType should have created the cache. return NO_CLASS_HANDLE; } if (simdBaseJitType == CORINFO_TYPE_FLOAT) { switch (simdType) { case TYP_SIMD8: return m_simdHandleCache->SIMDVector2Handle; case TYP_SIMD12: return m_simdHandleCache->SIMDVector3Handle; case TYP_SIMD16: if ((getSIMDVectorType() == TYP_SIMD32) || (m_simdHandleCache->SIMDVector4Handle != NO_CLASS_HANDLE)) { return m_simdHandleCache->SIMDVector4Handle; } break; case TYP_SIMD32: break; default: unreached(); } } assert(emitTypeSize(simdType) <= largestEnregisterableStructSize()); switch (simdBaseJitType) { case CORINFO_TYPE_FLOAT: return m_simdHandleCache->SIMDFloatHandle; case CORINFO_TYPE_DOUBLE: return m_simdHandleCache->SIMDDoubleHandle; case CORINFO_TYPE_INT: return m_simdHandleCache->SIMDIntHandle; case CORINFO_TYPE_USHORT: return m_simdHandleCache->SIMDUShortHandle; case CORINFO_TYPE_UBYTE: return m_simdHandleCache->SIMDUByteHandle; case CORINFO_TYPE_SHORT: return m_simdHandleCache->SIMDShortHandle; case CORINFO_TYPE_BYTE: return m_simdHandleCache->SIMDByteHandle; case CORINFO_TYPE_LONG: return m_simdHandleCache->SIMDLongHandle; case CORINFO_TYPE_UINT: return m_simdHandleCache->SIMDUIntHandle; case CORINFO_TYPE_ULONG: return m_simdHandleCache->SIMDULongHandle; case CORINFO_TYPE_NATIVEINT: return m_simdHandleCache->SIMDNIntHandle; case CORINFO_TYPE_NATIVEUINT: return m_simdHandleCache->SIMDNUIntHandle; default: assert(!"Didn't find a class handle for simdType"); } return NO_CLASS_HANDLE; } // Returns true if this is a SIMD type that should be considered an opaque // vector type (i.e. do not analyze or promote its fields). // Note that all but the fixed vector types are opaque, even though they may // actually be declared as having fields. bool isOpaqueSIMDType(CORINFO_CLASS_HANDLE structHandle) const { return ((m_simdHandleCache != nullptr) && (structHandle != m_simdHandleCache->SIMDVector2Handle) && (structHandle != m_simdHandleCache->SIMDVector3Handle) && (structHandle != m_simdHandleCache->SIMDVector4Handle)); } // Returns true if the tree corresponds to a TYP_SIMD lcl var. // Note that both SIMD vector args and locals are mared as lvSIMDType = true, but // type of an arg node is TYP_BYREF and a local node is TYP_SIMD or TYP_STRUCT. bool isSIMDTypeLocal(GenTree* tree) { return tree->OperIsLocal() && lvaGetDesc(tree->AsLclVarCommon())->lvSIMDType; } // Returns true if the lclVar is an opaque SIMD type. bool isOpaqueSIMDLclVar(const LclVarDsc* varDsc) const { if (!varDsc->lvSIMDType) { return false; } return isOpaqueSIMDType(varDsc->GetStructHnd()); } static bool isRelOpSIMDIntrinsic(SIMDIntrinsicID intrinsicId) { return (intrinsicId == SIMDIntrinsicEqual); } // Returns base JIT type of a TYP_SIMD local. // Returns CORINFO_TYPE_UNDEF if the local is not TYP_SIMD. CorInfoType getBaseJitTypeOfSIMDLocal(GenTree* tree) { if (isSIMDTypeLocal(tree)) { return lvaGetDesc(tree->AsLclVarCommon())->GetSimdBaseJitType(); } return CORINFO_TYPE_UNDEF; } bool isSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Numerics") == 0; } return false; } bool isSIMDClass(typeInfo* pTypeInfo) { return pTypeInfo->IsStruct() && isSIMDClass(pTypeInfo->GetClassHandleForValueClass()); } bool isHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { #ifdef FEATURE_HW_INTRINSICS if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Runtime.Intrinsics") == 0; } #endif // FEATURE_HW_INTRINSICS return false; } bool isHWSIMDClass(typeInfo* pTypeInfo) { #ifdef FEATURE_HW_INTRINSICS return pTypeInfo->IsStruct() && isHWSIMDClass(pTypeInfo->GetClassHandleForValueClass()); #else return false; #endif } bool isSIMDorHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { return isSIMDClass(clsHnd) || isHWSIMDClass(clsHnd); } bool isSIMDorHWSIMDClass(typeInfo* pTypeInfo) { return isSIMDClass(pTypeInfo) || isHWSIMDClass(pTypeInfo); } // Get the base (element) type and size in bytes for a SIMD type. Returns CORINFO_TYPE_UNDEF // if it is not a SIMD type or is an unsupported base JIT type. CorInfoType getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr); CorInfoType getBaseJitTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd) { return getBaseJitTypeAndSizeOfSIMDType(typeHnd, nullptr); } // Get SIMD Intrinsic info given the method handle. // Also sets typeHnd, argCount, baseType and sizeBytes out params. const SIMDIntrinsicInfo* getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* typeHnd, CORINFO_METHOD_HANDLE methodHnd, CORINFO_SIG_INFO* sig, bool isNewObj, unsigned* argCount, CorInfoType* simdBaseJitType, unsigned* sizeBytes); // Pops and returns GenTree node from importers type stack. // Normalizes TYP_STRUCT value in case of GT_CALL, GT_RET_EXPR and arg nodes. GenTree* impSIMDPopStack(var_types type, bool expectAddr = false, CORINFO_CLASS_HANDLE structType = nullptr); // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain given relop result. SIMDIntrinsicID impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId, CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, CorInfoType* inOutBaseJitType, GenTree** op1, GenTree** op2); #if defined(TARGET_XARCH) // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain == comparison result. SIMDIntrinsicID impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, GenTree** op1, GenTree** op2); #endif // defined(TARGET_XARCH) void setLclRelatedToSIMDIntrinsic(GenTree* tree); bool areFieldsContiguous(GenTree* op1, GenTree* op2); bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second); bool areArrayElementsContiguous(GenTree* op1, GenTree* op2); bool areArgumentsContiguous(GenTree* op1, GenTree* op2); GenTree* createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize); // check methodHnd to see if it is a SIMD method that is expanded as an intrinsic in the JIT. GenTree* impSIMDIntrinsic(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef); GenTree* getOp1ForConstructor(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd); // Whether SIMD vector occupies part of SIMD register. // SSE2: vector2f/3f are considered sub register SIMD types. // AVX: vector2f, 3f and 4f are all considered sub register SIMD types. bool isSubRegisterSIMDType(GenTreeSIMD* simdNode) { unsigned vectorRegisterByteLength; #if defined(TARGET_XARCH) // Calling the getSIMDVectorRegisterByteLength api causes the size of Vector<T> to be recorded // with the AOT compiler, so that it cannot change from aot compilation time to runtime // This api does not require such fixing as it merely pertains to the size of the simd type // relative to the Vector<T> size as used at compile time. (So detecting a vector length of 16 here // does not preclude the code from being used on a machine with a larger vector length.) if (getSIMDSupportLevel() < SIMD_AVX2_Supported) { vectorRegisterByteLength = 16; } else { vectorRegisterByteLength = 32; } #else vectorRegisterByteLength = getSIMDVectorRegisterByteLength(); #endif return (simdNode->GetSimdSize() < vectorRegisterByteLength); } // Get the type for the hardware SIMD vector. // This is the maximum SIMD type supported for this target. var_types getSIMDVectorType() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return TYP_SIMD32; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return TYP_SIMD16; } #elif defined(TARGET_ARM64) return TYP_SIMD16; #else assert(!"getSIMDVectorType() unimplemented on target arch"); unreached(); #endif } // Get the size of the SIMD type in bytes int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd) { unsigned sizeBytes = 0; (void)getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes); return sizeBytes; } // Get the the number of elements of baseType of SIMD vector given by its size and baseType static int getSIMDVectorLength(unsigned simdSize, var_types baseType); // Get the the number of elements of baseType of SIMD vector given by its type handle int getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd); // Get preferred alignment of SIMD type. int getSIMDTypeAlignment(var_types simdType); // Get the number of bytes in a System.Numeric.Vector<T> for the current compilation. // Note - cannot be used for System.Runtime.Intrinsic unsigned getSIMDVectorRegisterByteLength() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #elif defined(TARGET_ARM64) return FP_REGSIZE_BYTES; #else assert(!"getSIMDVectorRegisterByteLength() unimplemented on target arch"); unreached(); #endif } // The minimum and maximum possible number of bytes in a SIMD vector. // maxSIMDStructBytes // The minimum SIMD size supported by System.Numeric.Vectors or System.Runtime.Intrinsic // SSE: 16-byte Vector<T> and Vector128<T> // AVX: 32-byte Vector256<T> (Vector<T> is 16-byte) // AVX2: 32-byte Vector<T> and Vector256<T> unsigned int maxSIMDStructBytes() { #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX)) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #else return getSIMDVectorRegisterByteLength(); #endif } unsigned int minSIMDStructBytes() { return emitTypeSize(TYP_SIMD8); } public: // Returns the codegen type for a given SIMD size. static var_types getSIMDTypeForSize(unsigned size) { var_types simdType = TYP_UNDEF; if (size == 8) { simdType = TYP_SIMD8; } else if (size == 12) { simdType = TYP_SIMD12; } else if (size == 16) { simdType = TYP_SIMD16; } else if (size == 32) { simdType = TYP_SIMD32; } else { noway_assert(!"Unexpected size for SIMD type"); } return simdType; } private: unsigned getSIMDInitTempVarNum(var_types simdType); #else // !FEATURE_SIMD bool isOpaqueSIMDLclVar(LclVarDsc* varDsc) { return false; } #endif // FEATURE_SIMD public: //------------------------------------------------------------------------ // largestEnregisterableStruct: The size in bytes of the largest struct that can be enregistered. // // Notes: It is not guaranteed that the struct of this size or smaller WILL be a // candidate for enregistration. unsigned largestEnregisterableStructSize() { #ifdef FEATURE_SIMD #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (opts.IsReadyToRun()) { // Return constant instead of maxSIMDStructBytes, as maxSIMDStructBytes performs // checks that are effected by the current level of instruction set support would // otherwise cause the highest level of instruction set support to be reported to crossgen2. // and this api is only ever used as an optimization or assert, so no reporting should // ever happen. return YMM_REGSIZE_BYTES; } #endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) unsigned vectorRegSize = maxSIMDStructBytes(); assert(vectorRegSize >= TARGET_POINTER_SIZE); return vectorRegSize; #else // !FEATURE_SIMD return TARGET_POINTER_SIZE; #endif // !FEATURE_SIMD } // Use to determine if a struct *might* be a SIMD type. As this function only takes a size, many // structs will fit the criteria. bool structSizeMightRepresentSIMDType(size_t structSize) { #ifdef FEATURE_SIMD // Do not use maxSIMDStructBytes as that api in R2R on X86 and X64 may notify the JIT // about the size of a struct under the assumption that the struct size needs to be recorded. // By using largestEnregisterableStructSize here, the detail of whether or not Vector256<T> is // enregistered or not will not be messaged to the R2R compiler. return (structSize >= minSIMDStructBytes()) && (structSize <= largestEnregisterableStructSize()); #else return false; #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD static bool vnEncodesResultTypeForSIMDIntrinsic(SIMDIntrinsicID intrinsicId); #endif // !FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS static bool vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID); #endif // FEATURE_HW_INTRINSICS private: // These routines need not be enclosed under FEATURE_SIMD since lvIsSIMDType() // is defined for both FEATURE_SIMD and !FEATURE_SIMD apropriately. The use // of this routines also avoids the need of #ifdef FEATURE_SIMD specific code. // Is this var is of type simd struct? bool lclVarIsSIMDType(unsigned varNum) { return lvaGetDesc(varNum)->lvIsSIMDType(); } // Is this Local node a SIMD local? bool lclVarIsSIMDType(GenTreeLclVarCommon* lclVarTree) { return lclVarIsSIMDType(lclVarTree->GetLclNum()); } // Returns true if the TYP_SIMD locals on stack are aligned at their // preferred byte boundary specified by getSIMDTypeAlignment(). // // As per the Intel manual, the preferred alignment for AVX vectors is // 32-bytes. It is not clear whether additional stack space used in // aligning stack is worth the benefit and for now will use 16-byte // alignment for AVX 256-bit vectors with unaligned load/stores to/from // memory. On x86, the stack frame is aligned to 4 bytes. We need to extend // existing support for double (8-byte) alignment to 16 or 32 byte // alignment for frames with local SIMD vars, if that is determined to be // profitable. // // On Amd64 and SysV, RSP+8 is aligned on entry to the function (before // prolog has run). This means that in RBP-based frames RBP will be 16-byte // aligned. For RSP-based frames these are only sometimes aligned, depending // on the frame size. // bool isSIMDTypeLocalAligned(unsigned varNum) { #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES if (lclVarIsSIMDType(varNum) && lvaTable[varNum].lvType != TYP_BYREF) { // TODO-Cleanup: Can't this use the lvExactSize on the varDsc? int alignment = getSIMDTypeAlignment(lvaTable[varNum].lvType); if (alignment <= STACK_ALIGN) { bool rbpBased; int off = lvaFrameAddress(varNum, &rbpBased); // On SysV and Winx64 ABIs RSP+8 will be 16-byte aligned at the // first instruction of a function. If our frame is RBP based // then RBP will always be 16 bytes aligned, so we can simply // check the offset. if (rbpBased) { return (off % alignment) == 0; } // For RSP-based frame the alignment of RSP depends on our // locals. rsp+8 is aligned on entry and we just subtract frame // size so it is not hard to compute. Note that the compiler // tries hard to make sure the frame size means RSP will be // 16-byte aligned, but for leaf functions without locals (i.e. // frameSize = 0) it will not be. int frameSize = codeGen->genTotalFrameSize(); return ((8 - frameSize + off) % alignment) == 0; } } #endif // FEATURE_SIMD return false; } #ifdef DEBUG // Answer the question: Is a particular ISA supported? // Use this api when asking the question so that future // ISA questions can be asked correctly or when asserting // support/nonsupport for an instruction set bool compIsaSupportedDebugOnly(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) return (opts.compSupportsISA & (1ULL << isa)) != 0; #else return false; #endif } #endif // DEBUG bool notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const; // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will exactly match the target machine // on which the function is executed (except for CoreLib, where there are special rules) bool compExactlyDependsOn(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) uint64_t isaBit = (1ULL << isa); if ((opts.compSupportsISAReported & isaBit) == 0) { if (notifyInstructionSetUsage(isa, (opts.compSupportsISA & isaBit) != 0)) ((Compiler*)this)->opts.compSupportsISAExactly |= isaBit; ((Compiler*)this)->opts.compSupportsISAReported |= isaBit; } return (opts.compSupportsISAExactly & isaBit) != 0; #else return false; #endif } // Ensure that code will not execute if an instruction set is usable. Call only // if the instruction set has previously reported as unusable, but when // that that status has not yet been recorded to the AOT compiler void compVerifyInstructionSetUnusable(CORINFO_InstructionSet isa) { // use compExactlyDependsOn to capture are record the use of the isa bool isaUsable = compExactlyDependsOn(isa); // Assert that the is unusable. If true, this function should never be called. assert(!isaUsable); } // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will match the target machine if the result is true // If the result is false, then the target machine may have support for the instruction bool compOpportunisticallyDependsOn(CORINFO_InstructionSet isa) const { if ((opts.compSupportsISA & (1ULL << isa)) != 0) { return compExactlyDependsOn(isa); } else { return false; } } // Answer the question: Is a particular ISA supported for explicit hardware intrinsics? bool compHWIntrinsicDependsOn(CORINFO_InstructionSet isa) const { // Report intent to use the ISA to the EE compExactlyDependsOn(isa); return ((opts.compSupportsISA & (1ULL << isa)) != 0); } bool canUseVexEncoding() const { #ifdef TARGET_XARCH return compOpportunisticallyDependsOn(InstructionSet_AVX); #else return false; #endif } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Generic info about the compilation and the method being compiled. XX XX It is responsible for driving the other phases. XX XX It is also responsible for all the memory management. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: Compiler* InlineeCompiler; // The Compiler instance for the inlinee InlineResult* compInlineResult; // The result of importing the inlinee method. bool compDoAggressiveInlining; // If true, mark every method as CORINFO_FLG_FORCEINLINE bool compJmpOpUsed; // Does the method do a JMP bool compLongUsed; // Does the method use TYP_LONG bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE bool compTailCallUsed; // Does the method do a tailcall bool compTailPrefixSeen; // Does the method IL have tail. prefix bool compLocallocSeen; // Does the method IL have localloc opcode bool compLocallocUsed; // Does the method use localloc. bool compLocallocOptimized; // Does the method have an optimized localloc bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node. bool compHasBackwardJump; // Does the method (or some inlinee) have a lexically backwards jump? bool compHasBackwardJumpInHandler; // Does the method have a lexically backwards jump in a handler? bool compSwitchedToOptimized; // Codegen initially was Tier0 but jit switched to FullOpts bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts bool compSuppressedZeroInit; // There are vars with lvSuppressedZeroInit set // NOTE: These values are only reliable after // the importing is completely finished. #ifdef DEBUG // State information - which phases have completed? // These are kept together for easy discoverability bool bRangeAllowStress; bool compCodeGenDone; int64_t compNumStatementLinksTraversed; // # of links traversed while doing debug checks bool fgNormalizeEHDone; // Has the flowgraph EH normalization phase been done? size_t compSizeEstimate; // The estimated size of the method as per `gtSetEvalOrder`. size_t compCycleEstimate; // The estimated cycle count of the method as per `gtSetEvalOrder` #endif // DEBUG bool fgLocalVarLivenessDone; // Note that this one is used outside of debug. bool fgLocalVarLivenessChanged; bool compLSRADone; bool compRationalIRForm; bool compUsesThrowHelper; // There is a call to a THROW_HELPER for the compiled method. bool compGeneratingProlog; bool compGeneratingEpilog; bool compNeedsGSSecurityCookie; // There is an unsafe buffer (or localloc) on the stack. // Insert cookie on frame and code to check the cookie, like VC++ -GS. bool compGSReorderStackLayout; // There is an unsafe buffer on the stack, reorder locals and make local // copies of susceptible parameters to avoid buffer overrun attacks through locals/params bool getNeedsGSSecurityCookie() const { return compNeedsGSSecurityCookie; } void setNeedsGSSecurityCookie() { compNeedsGSSecurityCookie = true; } FrameLayoutState lvaDoneFrameLayout; // The highest frame layout state that we've completed. During // frame layout calculations, this is the level we are currently // computing. //---------------------------- JITing options ----------------------------- enum codeOptimize { BLENDED_CODE, SMALL_CODE, FAST_CODE, COUNT_OPT_CODE }; struct Options { JitFlags* jitFlags; // all flags passed from the EE // The instruction sets that the compiler is allowed to emit. uint64_t compSupportsISA; // The instruction sets that were reported to the VM as being used by the current method. Subset of // compSupportsISA. uint64_t compSupportsISAReported; // The instruction sets that the compiler is allowed to take advantage of implicitly during optimizations. // Subset of compSupportsISA. // The instruction sets available in compSupportsISA and not available in compSupportsISAExactly can be only // used via explicit hardware intrinsics. uint64_t compSupportsISAExactly; void setSupportedISAs(CORINFO_InstructionSetFlags isas) { compSupportsISA = isas.GetFlagsRaw(); } unsigned compFlags; // method attributes unsigned instrCount; unsigned lvRefCount; codeOptimize compCodeOpt; // what type of code optimizations bool compUseCMOV; // optimize maximally and/or favor speed over size? #define DEFAULT_MIN_OPTS_CODE_SIZE 60000 #define DEFAULT_MIN_OPTS_INSTR_COUNT 20000 #define DEFAULT_MIN_OPTS_BB_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000 // Maximun number of locals before turning off the inlining #define MAX_LV_NUM_COUNT_FOR_INLINING 512 bool compMinOpts; bool compMinOptsIsSet; #ifdef DEBUG mutable bool compMinOptsIsUsed; bool MinOpts() const { assert(compMinOptsIsSet); compMinOptsIsUsed = true; return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #else // !DEBUG bool MinOpts() const { return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #endif // !DEBUG bool OptimizationDisabled() const { return MinOpts() || compDbgCode; } bool OptimizationEnabled() const { return !OptimizationDisabled(); } void SetMinOpts(bool val) { assert(!compMinOptsIsUsed); assert(!compMinOptsIsSet || (compMinOpts == val)); compMinOpts = val; compMinOptsIsSet = true; } // true if the CLFLG_* for an optimization is set. bool OptEnabled(unsigned optFlag) const { return !!(compFlags & optFlag); } #ifdef FEATURE_READYTORUN bool IsReadyToRun() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_READYTORUN); } #else bool IsReadyToRun() const { return false; } #endif // Check if the compilation is control-flow guard enabled. bool IsCFGEnabled() const { #if defined(TARGET_ARM64) || defined(TARGET_AMD64) // On these platforms we assume the register that the target is // passed in is preserved by the validator and take care to get the // target from the register for the call (even in debug mode). static_assert_no_msg((RBM_VALIDATE_INDIRECT_CALL_TRASH & (1 << REG_VALIDATE_INDIRECT_CALL_ADDR)) == 0); if (JitConfig.JitForceControlFlowGuard()) return true; return jitFlags->IsSet(JitFlags::JIT_FLAG_ENABLE_CFG); #else // The remaining platforms are not supported and would require some // work to support. // // ARM32: // The ARM32 validator does not preserve any volatile registers // which means we have to take special care to allocate and use a // callee-saved register (reloading the target from memory is a // security issue). // // x86: // On x86 some VSD calls disassemble the call site and expect an // indirect call which is fundamentally incompatible with CFG. // This would require a different way to pass this information // through. // return false; #endif } #ifdef FEATURE_ON_STACK_REPLACEMENT bool IsOSR() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_OSR); } #else bool IsOSR() const { return false; } #endif // true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating // PInvoke transitions inline. Normally used by R2R, but also used when generating a reverse pinvoke frame, as // the current logic for frame setup initializes and pushes // the InlinedCallFrame before performing the Reverse PInvoke transition, which is invalid (as frames cannot // safely be pushed/popped while the thread is in a preemptive state.). bool ShouldUsePInvokeHelpers() { return jitFlags->IsSet(JitFlags::JIT_FLAG_USE_PINVOKE_HELPERS) || jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } // true if we should use insert the REVERSE_PINVOKE_{ENTER,EXIT} helpers in the method // prolog/epilog bool IsReversePInvoke() { return jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } bool compScopeInfo; // Generate the LocalVar info ? bool compDbgCode; // Generate debugger-friendly code? bool compDbgInfo; // Gather debugging info? bool compDbgEnC; #ifdef PROFILING_SUPPORTED bool compNoPInvokeInlineCB; #else static const bool compNoPInvokeInlineCB; #endif #ifdef DEBUG bool compGcChecks; // Check arguments and return values to ensure they are sane #endif #if defined(DEBUG) && defined(TARGET_XARCH) bool compStackCheckOnRet; // Check stack pointer on return to ensure it is correct. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) bool compStackCheckOnCall; // Check stack pointer after call to ensure it is correct. Only for x86. #endif // defined(DEBUG) && defined(TARGET_X86) bool compReloc; // Generate relocs for pointers in code, true for all ngen/prejit codegen #ifdef DEBUG #if defined(TARGET_XARCH) bool compEnablePCRelAddr; // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible #endif #endif // DEBUG #ifdef UNIX_AMD64_ABI // This flag is indicating if there is a need to align the frame. // On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for // FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called. // On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of // 0. The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that // there are calls and making sure the frame alignment logic is executed. bool compNeedToAlignFrame; #endif // UNIX_AMD64_ABI bool compProcedureSplitting; // Separate cold code from hot code bool genFPorder; // Preserve FP order (operations are non-commutative) bool genFPopt; // Can we do frame-pointer-omission optimization? bool altJit; // True if we are an altjit and are compiling this method #ifdef OPT_CONFIG bool optRepeat; // Repeat optimizer phases k times #endif #ifdef DEBUG bool compProcedureSplittingEH; // Separate cold code from hot code for functions with EH bool dspCode; // Display native code generated bool dspEHTable; // Display the EH table reported to the VM bool dspDebugInfo; // Display the Debug info reported to the VM bool dspInstrs; // Display the IL instructions intermixed with the native code output bool dspLines; // Display source-code lines intermixed with native code output bool dmpHex; // Display raw bytes in hex of native code output bool varNames; // Display variables names in native code output bool disAsm; // Display native code as it is generated bool disAsmSpilled; // Display native code when any register spilling occurs bool disasmWithGC; // Display GC info interleaved with disassembly. bool disDiffable; // Makes the Disassembly code 'diff-able' bool disAddr; // Display process address next to each instruction in disassembly code bool disAlignment; // Display alignment boundaries in disassembly code bool disAsm2; // Display native code after it is generated using external disassembler bool dspOrder; // Display names of each of the methods that we ngen/jit bool dspUnwind; // Display the unwind info output bool dspDiffable; // Makes the Jit Dump 'diff-able' (currently uses same COMPlus_* flag as disDiffable) bool compLongAddress; // Force using large pseudo instructions for long address // (IF_LARGEJMP/IF_LARGEADR/IF_LARGLDC) bool dspGCtbls; // Display the GC tables #endif bool compExpandCallsEarly; // True if we should expand virtual call targets early for this method // Default numbers used to perform loop alignment. All the numbers are choosen // based on experimenting with various benchmarks. // Default minimum loop block weight required to enable loop alignment. #define DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT 4 // By default a loop will be aligned at 32B address boundary to get better // performance as per architecture manuals. #define DEFAULT_ALIGN_LOOP_BOUNDARY 0x20 // For non-adaptive loop alignment, by default, only align a loop whose size is // at most 3 times the alignment block size. If the loop is bigger than that, it is most // likely complicated enough that loop alignment will not impact performance. #define DEFAULT_MAX_LOOPSIZE_FOR_ALIGN DEFAULT_ALIGN_LOOP_BOUNDARY * 3 #ifdef DEBUG // Loop alignment variables // If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary. bool compJitAlignLoopForJcc; #endif // For non-adaptive alignment, minimum loop size (in bytes) for which alignment will be done. unsigned short compJitAlignLoopMaxCodeSize; // Minimum weight needed for the first block of a loop to make it a candidate for alignment. unsigned short compJitAlignLoopMinBlockWeight; // For non-adaptive alignment, address boundary (power of 2) at which loop alignment should // be done. By default, 32B. unsigned short compJitAlignLoopBoundary; // Padding limit to align a loop. unsigned short compJitAlignPaddingLimit; // If set, perform adaptive loop alignment that limits number of padding based on loop size. bool compJitAlignLoopAdaptive; // If set, tries to hide alignment instructions behind unconditional jumps. bool compJitHideAlignBehindJmp; // If set, tracks the hidden return buffer for struct arg. bool compJitOptimizeStructHiddenBuffer; #ifdef LATE_DISASM bool doLateDisasm; // Run the late disassembler #endif // LATE_DISASM #if DUMP_GC_TABLES && !defined(DEBUG) #pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!") static const bool dspGCtbls = true; #endif #ifdef PROFILING_SUPPORTED // Whether to emit Enter/Leave/TailCall hooks using a dummy stub (DummyProfilerELTStub()). // This option helps make the JIT behave as if it is running under a profiler. bool compJitELTHookEnabled; #endif // PROFILING_SUPPORTED #if FEATURE_TAILCALL_OPT // Whether opportunistic or implicit tail call optimization is enabled. bool compTailCallOpt; // Whether optimization of transforming a recursive tail call into a loop is enabled. bool compTailCallLoopOpt; #endif #if FEATURE_FASTTAILCALL // Whether fast tail calls are allowed. bool compFastTailCalls; #endif // FEATURE_FASTTAILCALL #if defined(TARGET_ARM64) // Decision about whether to save FP/LR registers with callee-saved registers (see // COMPlus_JitSaveFpLrWithCalleSavedRegisters). int compJitSaveFpLrWithCalleeSavedRegisters; #endif // defined(TARGET_ARM64) #ifdef CONFIGURABLE_ARM_ABI bool compUseSoftFP = false; #else #ifdef ARM_SOFTFP static const bool compUseSoftFP = true; #else // !ARM_SOFTFP static const bool compUseSoftFP = false; #endif // ARM_SOFTFP #endif // CONFIGURABLE_ARM_ABI } opts; static bool s_pAltJitExcludeAssembliesListInitialized; static AssemblyNamesList2* s_pAltJitExcludeAssembliesList; #ifdef DEBUG static bool s_pJitDisasmIncludeAssembliesListInitialized; static AssemblyNamesList2* s_pJitDisasmIncludeAssembliesList; static bool s_pJitFunctionFileInitialized; static MethodSet* s_pJitMethodSet; #endif // DEBUG #ifdef DEBUG // silence warning of cast to greater size. It is easier to silence than construct code the compiler is happy with, and // it is safe in this case #pragma warning(push) #pragma warning(disable : 4312) template <typename T> T dspPtr(T p) { return (p == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : p); } template <typename T> T dspOffset(T o) { return (o == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : o); } #pragma warning(pop) static int dspTreeID(GenTree* tree) { return tree->gtTreeID; } static void printStmtID(Statement* stmt) { assert(stmt != nullptr); printf(FMT_STMT, stmt->GetID()); } static void printTreeID(GenTree* tree) { if (tree == nullptr) { printf("[------]"); } else { printf("[%06d]", dspTreeID(tree)); } } const char* pgoSourceToString(ICorJitInfo::PgoSource p); const char* devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail); #endif // DEBUG // clang-format off #define STRESS_MODES \ \ STRESS_MODE(NONE) \ \ /* "Variations" stress areas which we try to mix up with each other. */ \ /* These should not be exhaustively used as they might */ \ /* hide/trivialize other areas */ \ \ STRESS_MODE(REGS) \ STRESS_MODE(DBL_ALN) \ STRESS_MODE(LCL_FLDS) \ STRESS_MODE(UNROLL_LOOPS) \ STRESS_MODE(MAKE_CSE) \ STRESS_MODE(LEGACY_INLINE) \ STRESS_MODE(CLONE_EXPR) \ STRESS_MODE(USE_CMOV) \ STRESS_MODE(FOLD) \ STRESS_MODE(MERGED_RETURNS) \ STRESS_MODE(BB_PROFILE) \ STRESS_MODE(OPT_BOOLS_GC) \ STRESS_MODE(REMORPH_TREES) \ STRESS_MODE(64RSLT_MUL) \ STRESS_MODE(DO_WHILE_LOOPS) \ STRESS_MODE(MIN_OPTS) \ STRESS_MODE(REVERSE_FLAG) /* Will set GTF_REVERSE_OPS whenever we can */ \ STRESS_MODE(REVERSE_COMMA) /* Will reverse commas created with gtNewCommaNode */ \ STRESS_MODE(TAILCALL) /* Will make the call as a tailcall whenever legal */ \ STRESS_MODE(CATCH_ARG) /* Will spill catch arg */ \ STRESS_MODE(UNSAFE_BUFFER_CHECKS) \ STRESS_MODE(NULL_OBJECT_CHECK) \ STRESS_MODE(PINVOKE_RESTORE_ESP) \ STRESS_MODE(RANDOM_INLINE) \ STRESS_MODE(SWITCH_CMP_BR_EXPANSION) \ STRESS_MODE(GENERIC_VARN) \ STRESS_MODE(PROFILER_CALLBACKS) /* Will generate profiler hooks for ELT callbacks */ \ STRESS_MODE(BYREF_PROMOTION) /* Change undoPromotion decisions for byrefs */ \ STRESS_MODE(PROMOTE_FEWER_STRUCTS)/* Don't promote some structs that can be promoted */ \ STRESS_MODE(VN_BUDGET)/* Randomize the VN budget */ \ \ /* After COUNT_VARN, stress level 2 does all of these all the time */ \ \ STRESS_MODE(COUNT_VARN) \ \ /* "Check" stress areas that can be exhaustively used if we */ \ /* dont care about performance at all */ \ \ STRESS_MODE(FORCE_INLINE) /* Treat every method as AggressiveInlining */ \ STRESS_MODE(CHK_FLOW_UPDATE) \ STRESS_MODE(EMITTER) \ STRESS_MODE(CHK_REIMPORT) \ STRESS_MODE(FLATFP) \ STRESS_MODE(GENERIC_CHECK) \ STRESS_MODE(COUNT) enum compStressArea { #define STRESS_MODE(mode) STRESS_##mode, STRESS_MODES #undef STRESS_MODE }; // clang-format on #ifdef DEBUG static const LPCWSTR s_compStressModeNames[STRESS_COUNT + 1]; BYTE compActiveStressModes[STRESS_COUNT]; #endif // DEBUG #define MAX_STRESS_WEIGHT 100 bool compStressCompile(compStressArea stressArea, unsigned weightPercentage); bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage); #ifdef DEBUG bool compInlineStress() { return compStressCompile(STRESS_LEGACY_INLINE, 50); } bool compRandomInlineStress() { return compStressCompile(STRESS_RANDOM_INLINE, 50); } bool compPromoteFewerStructs(unsigned lclNum); #endif // DEBUG bool compTailCallStress() { #ifdef DEBUG // Do not stress tailcalls in IL stubs as the runtime creates several IL // stubs to implement the tailcall mechanism, which would then // recursively create more IL stubs. return !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && (JitConfig.TailcallStress() != 0 || compStressCompile(STRESS_TAILCALL, 5)); #else return false; #endif } const char* compGetTieringName(bool wantShortName = false) const; const char* compGetStressMessage() const; codeOptimize compCodeOpt() const { #if 0 // Switching between size & speed has measurable throughput impact // (3.5% on NGen CoreLib when measured). It used to be enabled for // DEBUG, but should generate identical code between CHK & RET builds, // so that's not acceptable. // TODO-Throughput: Figure out what to do about size vs. speed & throughput. // Investigate the cause of the throughput regression. return opts.compCodeOpt; #else return BLENDED_CODE; #endif } //--------------------- Info about the procedure -------------------------- struct Info { COMP_HANDLE compCompHnd; CORINFO_MODULE_HANDLE compScopeHnd; CORINFO_CLASS_HANDLE compClassHnd; CORINFO_METHOD_HANDLE compMethodHnd; CORINFO_METHOD_INFO* compMethodInfo; bool hasCircularClassConstraints; bool hasCircularMethodConstraints; #if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS const char* compMethodName; const char* compClassName; const char* compFullName; double compPerfScore; int compMethodSuperPMIIndex; // useful when debugging under SuperPMI #endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS #if defined(DEBUG) || defined(INLINE_DATA) // Method hash is logically const, but computed // on first demand. mutable unsigned compMethodHashPrivate; unsigned compMethodHash() const; #endif // defined(DEBUG) || defined(INLINE_DATA) #ifdef PSEUDORANDOM_NOP_INSERTION // things for pseudorandom nop insertion unsigned compChecksum; CLRRandom compRNG; #endif // The following holds the FLG_xxxx flags for the method we're compiling. unsigned compFlags; // The following holds the class attributes for the method we're compiling. unsigned compClassAttr; const BYTE* compCode; IL_OFFSET compILCodeSize; // The IL code size IL_OFFSET compILImportSize; // Estimated amount of IL actually imported IL_OFFSET compILEntry; // The IL entry point (normally 0) PatchpointInfo* compPatchpointInfo; // Patchpoint data for OSR (normally nullptr) UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This // is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if: // (1) the code is not hot/cold split, and we issued less code than we expected, or // (2) the code is hot/cold split, and we issued less code than we expected // in the cold section (the hot section will always be padded out to compTotalHotCodeSize). bool compIsStatic : 1; // Is the method static (no 'this' pointer)? bool compIsVarArgs : 1; // Does the method have varargs parameters? bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options? bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic bool compHasNextCallRetAddr : 1; // The NextCallReturnAddress intrinsic is used. var_types compRetType; // Return type of the method as declared in IL var_types compRetNativeType; // Normalized return type as per target arch ABI unsigned compILargsCount; // Number of arguments (incl. implicit but not hidden) unsigned compArgsCount; // Number of arguments (incl. implicit and hidden) #if FEATURE_FASTTAILCALL unsigned compArgStackSize; // Incoming argument stack size in bytes #endif // FEATURE_FASTTAILCALL unsigned compRetBuffArg; // position of hidden return param var (0, 1) (BAD_VAR_NUM means not present); int compTypeCtxtArg; // position of hidden param for type context for generic code (CORINFO_CALLCONV_PARAMTYPE) unsigned compThisArg; // position of implicit this pointer param (not to be confused with lvaArg0Var) unsigned compILlocalsCount; // Number of vars : args + locals (incl. implicit but not hidden) unsigned compLocalsCount; // Number of vars : args + locals (incl. implicit and hidden) unsigned compMaxStack; UNATIVE_OFFSET compTotalHotCodeSize; // Total number of bytes of Hot Code in the method UNATIVE_OFFSET compTotalColdCodeSize; // Total number of bytes of Cold Code in the method unsigned compUnmanagedCallCountWithGCTransition; // count of unmanaged calls with GC transition. CorInfoCallConvExtension compCallConv; // The entry-point calling convention for this method. unsigned compLvFrameListRoot; // lclNum for the Frame root unsigned compXcptnsCount; // Number of exception-handling clauses read in the method's IL. // You should generally use compHndBBtabCount instead: it is the // current number of EH clauses (after additions like synchronized // methods and funclets, and removals like unreachable code deletion). Target::ArgOrder compArgOrder; bool compMatchedVM; // true if the VM is "matched": either the JIT is a cross-compiler // and the VM expects that, or the JIT is a "self-host" compiler // (e.g., x86 hosted targeting x86) and the VM expects that. /* The following holds IL scope information about local variables. */ unsigned compVarScopesCount; VarScopeDsc* compVarScopes; /* The following holds information about instr offsets for * which we need to report IP-mappings */ IL_OFFSET* compStmtOffsets; // sorted unsigned compStmtOffsetsCount; ICorDebugInfo::BoundaryTypes compStmtOffsetsImplicit; #define CPU_X86 0x0100 // The generic X86 CPU #define CPU_X86_PENTIUM_4 0x0110 #define CPU_X64 0x0200 // The generic x64 CPU #define CPU_AMD_X64 0x0210 // AMD x64 CPU #define CPU_INTEL_X64 0x0240 // Intel x64 CPU #define CPU_ARM 0x0300 // The generic ARM CPU #define CPU_ARM64 0x0400 // The generic ARM64 CPU unsigned genCPU; // What CPU are we running on // Number of class profile probes in this method unsigned compClassProbeCount; } info; // Returns true if the method being compiled returns a non-void and non-struct value. // Note that lvaInitTypeRef() normalizes compRetNativeType for struct returns in a // single register as per target arch ABI (e.g on Amd64 Windows structs of size 1, 2, // 4 or 8 gets normalized to TYP_BYTE/TYP_SHORT/TYP_INT/TYP_LONG; On Arm HFA structs). // Methods returning such structs are considered to return non-struct return value and // this method returns true in that case. bool compMethodReturnsNativeScalarType() { return (info.compRetType != TYP_VOID) && !varTypeIsStruct(info.compRetNativeType); } // Returns true if the method being compiled returns RetBuf addr as its return value bool compMethodReturnsRetBufAddr() { // There are cases where implicit RetBuf argument should be explicitly returned in a register. // In such cases the return type is changed to TYP_BYREF and appropriate IR is generated. // These cases are: CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_AMD64 // 1. on x64 Windows and Unix the address of RetBuf needs to be returned by // methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF, // returning the address of RetBuf. return (info.compRetBuffArg != BAD_VAR_NUM); #else // TARGET_AMD64 #ifdef PROFILING_SUPPORTED // 2. Profiler Leave callback expects the address of retbuf as return value for // methods with hidden RetBuf argument. impReturnInstruction() when profiler // callbacks are needed creates GT_RETURN(TYP_BYREF, op1 = Addr of RetBuf) for // methods with hidden RetBufArg. if (compIsProfilerHookNeeded()) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif // 3. Windows ARM64 native instance calling convention requires the address of RetBuff // to be returned in x0. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM64) if (TargetOS::IsWindows) { auto callConv = info.compCallConv; if (callConvIsInstanceMethodCallConv(callConv)) { return (info.compRetBuffArg != BAD_VAR_NUM); } } #endif // TARGET_ARM64 // 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) if (info.compCallConv != CorInfoCallConvExtension::Managed) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif return false; #endif // TARGET_AMD64 } // Returns true if the method returns a value in more than one return register // TODO-ARM-Bug: Deal with multi-register genReturnLocaled structs? // TODO-ARM64: Does this apply for ARM64 too? bool compMethodReturnsMultiRegRetType() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } bool compEnregLocals() { return ((opts.compFlags & CLFLG_REGVAR) != 0); } bool compEnregStructLocals() { return (JitConfig.JitEnregStructLocals() != 0); } bool compObjectStackAllocation() { return (JitConfig.JitObjectStackAllocation() != 0); } // Returns true if the method returns a value in more than one return register, // it should replace/be merged with compMethodReturnsMultiRegRetType when #36868 is fixed. // The difference from original `compMethodReturnsMultiRegRetType` is in ARM64 SIMD* handling, // this method correctly returns false for it (it is passed as HVA), when the original returns true. bool compMethodReturnsMultiRegRegTypeAlternate() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 #if defined(TARGET_ARM64) // TYP_SIMD* are returned in one register. if (varTypeIsSIMD(info.compRetNativeType)) { return false; } #endif // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } // Returns true if the method being compiled returns a value bool compMethodHasRetVal() { return compMethodReturnsNativeScalarType() || compMethodReturnsRetBufAddr() || compMethodReturnsMultiRegRetType(); } // Returns true if the method requires a PInvoke prolog and epilog bool compMethodRequiresPInvokeFrame() { return (info.compUnmanagedCallCountWithGCTransition > 0); } // Returns true if address-exposed user variables should be poisoned with a recognizable value bool compShouldPoisonFrame() { #ifdef FEATURE_ON_STACK_REPLACEMENT if (opts.IsOSR()) return false; #endif return !info.compInitMem && opts.compDbgCode; } // Returns true if the jit supports having patchpoints in this method. // Optionally, get the reason why not. bool compCanHavePatchpoints(const char** reason = nullptr); #if defined(DEBUG) void compDispLocalVars(); #endif // DEBUG private: class ClassLayoutTable* m_classLayoutTable; class ClassLayoutTable* typCreateClassLayoutTable(); class ClassLayoutTable* typGetClassLayoutTable(); public: // Get the layout having the specified layout number. ClassLayout* typGetLayoutByNum(unsigned layoutNum); // Get the layout number of the specified layout. unsigned typGetLayoutNum(ClassLayout* layout); // Get the layout having the specified size but no class handle. ClassLayout* typGetBlkLayout(unsigned blockSize); // Get the number of a layout having the specified size but no class handle. unsigned typGetBlkLayoutNum(unsigned blockSize); // Get the layout for the specified class handle. ClassLayout* typGetObjLayout(CORINFO_CLASS_HANDLE classHandle); // Get the number of a layout for the specified class handle. unsigned typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle); //-------------------------- Global Compiler Data ------------------------------------ #ifdef DEBUG private: static LONG s_compMethodsCount; // to produce unique label names #endif public: #ifdef DEBUG LONG compMethodID; unsigned compGenTreeID; unsigned compStatementID; unsigned compBasicBlockID; #endif BasicBlock* compCurBB; // the current basic block in process Statement* compCurStmt; // the current statement in process GenTree* compCurTree; // the current tree in process // The following is used to create the 'method JIT info' block. size_t compInfoBlkSize; BYTE* compInfoBlkAddr; EHblkDsc* compHndBBtab; // array of EH data unsigned compHndBBtabCount; // element count of used elements in EH data array unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array #if defined(TARGET_X86) //------------------------------------------------------------------------- // Tracking of region covered by the monitor in synchronized methods void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT #endif // !TARGET_X86 Phases mostRecentlyActivePhase; // the most recently active phase PhaseChecks activePhaseChecks; // the currently active phase checks //------------------------------------------------------------------------- // The following keeps track of how many bytes of local frame space we've // grabbed so far in the current function, and how many argument bytes we // need to pop when we return. // unsigned compLclFrameSize; // secObject+lclBlk+locals+temps // Count of callee-saved regs we pushed in the prolog. // Does not include EBP for isFramePointerUsed() and double-aligned frames. // In case of Amd64 this doesn't include float regs saved on stack. unsigned compCalleeRegsPushed; #if defined(TARGET_XARCH) // Mask of callee saved float regs on stack. regMaskTP compCalleeFPRegsSavedMask; #endif #ifdef TARGET_AMD64 // Quirk for VS debug-launch scenario to work: // Bytes of padding between save-reg area and locals. #define VSQUIRK_STACK_PAD (2 * REGSIZE_BYTES) unsigned compVSQuirkStackPaddingNeeded; #endif unsigned compArgSize; // total size of arguments in bytes (including register args (lvIsRegArg)) #ifdef TARGET_ARM bool compHasSplitParam; #endif unsigned compMapILargNum(unsigned ILargNum); // map accounting for hidden args unsigned compMapILvarNum(unsigned ILvarNum); // map accounting for hidden args unsigned compMap2ILvarNum(unsigned varNum) const; // map accounting for hidden args #if defined(TARGET_ARM64) struct FrameInfo { // Frame type (1-5) int frameType; // Distance from established (method body) SP to base of callee save area int calleeSaveSpOffset; // Amount to subtract from SP before saving (prolog) OR // to add to SP after restoring (epilog) callee saves int calleeSaveSpDelta; // Distance from established SP to where caller's FP was saved int offsetSpToSavedFp; } compFrameInfo; #endif //------------------------------------------------------------------------- static void compStartup(); // One-time initialization static void compShutdown(); // One-time finalization void compInit(ArenaAllocator* pAlloc, CORINFO_METHOD_HANDLE methodHnd, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, InlineInfo* inlineInfo); void compDone(); static void compDisplayStaticSizes(FILE* fout); //------------ Some utility functions -------------- void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ void** ppIndirection); /* OUT */ // Several JIT/EE interface functions return a CorInfoType, and also return a // class handle as an out parameter if the type is a value class. Returns the // size of the type these describe. unsigned compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd); #ifdef DEBUG // Components used by the compiler may write unit test suites, and // have them run within this method. They will be run only once per process, and only // in debug. (Perhaps should be under the control of a COMPlus_ flag.) // These should fail by asserting. void compDoComponentUnitTestsOnce(); #endif // DEBUG int compCompile(CORINFO_MODULE_HANDLE classPtr, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); void compCompileFinish(); int compCompileHelper(CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlag); ArenaAllocator* compGetArenaAllocator(); void generatePatchpointInfo(); #if MEASURE_MEM_ALLOC static bool s_dspMemStats; // Display per-phase memory statistics for every function #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS unsigned m_loopsConsidered; bool m_curLoopHasHoistedExpression; unsigned m_loopsWithHoistedExpressions; unsigned m_totalHoistedExpressions; void AddLoopHoistStats(); void PrintPerMethodLoopHoistStats(); static CritSecObject s_loopHoistStatsLock; // This lock protects the data structures below. static unsigned s_loopsConsidered; static unsigned s_loopsWithHoistedExpressions; static unsigned s_totalHoistedExpressions; static void PrintAggregateLoopHoistStats(FILE* f); #endif // LOOP_HOIST_STATS #if TRACK_ENREG_STATS class EnregisterStats { private: unsigned m_totalNumberOfVars; unsigned m_totalNumberOfStructVars; unsigned m_totalNumberOfEnregVars; unsigned m_totalNumberOfStructEnregVars; unsigned m_addrExposed; unsigned m_hiddenStructArg; unsigned m_VMNeedsStackAddr; unsigned m_localField; unsigned m_blockOp; unsigned m_dontEnregStructs; unsigned m_notRegSizeStruct; unsigned m_structArg; unsigned m_lclAddrNode; unsigned m_castTakesAddr; unsigned m_storeBlkSrc; unsigned m_oneAsgRetyping; unsigned m_swizzleArg; unsigned m_blockOpRet; unsigned m_returnSpCheck; unsigned m_simdUserForcesDep; unsigned m_liveInOutHndlr; unsigned m_depField; unsigned m_noRegVars; unsigned m_minOptsGC; #ifdef JIT32_GCENCODER unsigned m_PinningRef; #endif // JIT32_GCENCODER #if !defined(TARGET_64BIT) unsigned m_longParamField; #endif // !TARGET_64BIT unsigned m_parentExposed; unsigned m_tooConservative; unsigned m_escapeAddress; unsigned m_osrExposed; unsigned m_stressLclFld; unsigned m_copyFldByFld; unsigned m_dispatchRetBuf; unsigned m_wideIndir; public: void RecordLocal(const LclVarDsc* varDsc); void Dump(FILE* fout) const; }; static EnregisterStats s_enregisterStats; #endif // TRACK_ENREG_STATS bool compIsForImportOnly(); bool compIsForInlining() const; bool compDonotInline(); #ifdef DEBUG // Get the default fill char value we randomize this value when JitStress is enabled. static unsigned char compGetJitDefaultFill(Compiler* comp); const char* compLocalVarName(unsigned varNum, unsigned offs); VarName compVarName(regNumber reg, bool isFloatReg = false); const char* compRegVarName(regNumber reg, bool displayVar = false, bool isFloatReg = false); const char* compRegNameForSize(regNumber reg, size_t size); const char* compFPregVarName(unsigned fpReg, bool displayVar = false); void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP); void compDspSrcLinesByLineNum(unsigned line, bool seek = false); #endif // DEBUG //------------------------------------------------------------------------- struct VarScopeListNode { VarScopeDsc* data; VarScopeListNode* next; static VarScopeListNode* Create(VarScopeDsc* value, CompAllocator alloc) { VarScopeListNode* node = new (alloc) VarScopeListNode; node->data = value; node->next = nullptr; return node; } }; struct VarScopeMapInfo { VarScopeListNode* head; VarScopeListNode* tail; static VarScopeMapInfo* Create(VarScopeListNode* node, CompAllocator alloc) { VarScopeMapInfo* info = new (alloc) VarScopeMapInfo; info->head = node; info->tail = node; return info; } }; // Max value of scope count for which we would use linear search; for larger values we would use hashtable lookup. static const unsigned MAX_LINEAR_FIND_LCL_SCOPELIST = 32; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*> VarNumToScopeDscMap; // Map to keep variables' scope indexed by varNum containing it's scope dscs at the index. VarNumToScopeDscMap* compVarScopeMap; VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd); VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned offs); VarScopeDsc* compFindLocalVarLinear(unsigned varNum, unsigned offs); void compInitVarScopeMap(); VarScopeDsc** compEnterScopeList; // List has the offsets where variables // enter scope, sorted by instr offset unsigned compNextEnterScope; VarScopeDsc** compExitScopeList; // List has the offsets where variables // go out of scope, sorted by instr offset unsigned compNextExitScope; void compInitScopeLists(); void compResetScopeLists(); VarScopeDsc* compGetNextEnterScope(unsigned offs, bool scan = false); VarScopeDsc* compGetNextExitScope(unsigned offs, bool scan = false); void compProcessScopesUntil(unsigned offset, VARSET_TP* inScope, void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*), void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*)); #ifdef DEBUG void compDispScopeLists(); #endif // DEBUG bool compIsProfilerHookNeeded(); //------------------------------------------------------------------------- /* Statistical Data Gathering */ void compJitStats(); // call this function and enable // various ifdef's below for statistical data #if CALL_ARG_STATS void compCallArgStats(); static void compDispCallArgStats(FILE* fout); #endif //------------------------------------------------------------------------- protected: #ifdef DEBUG bool skipMethod(); #endif ArenaAllocator* compArenaAllocator; public: void compFunctionTraceStart(); void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI); protected: size_t compMaxUncheckedOffsetForNullObject; void compInitOptions(JitFlags* compileFlags); void compSetProcessor(); void compInitDebuggingInfo(); void compSetOptimizationLevel(); #ifdef TARGET_ARMARCH bool compRsvdRegCheck(FrameLayoutState curState); #endif void compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); // Clear annotations produced during optimizations; to be used between iterations when repeating opts. void ResetOptAnnotations(); // Regenerate loop descriptors; to be used between iterations when repeating opts. void RecomputeLoopInfo(); #ifdef PROFILING_SUPPORTED // Data required for generating profiler Enter/Leave/TailCall hooks bool compProfilerHookNeeded; // Whether profiler Enter/Leave/TailCall hook needs to be generated for the method void* compProfilerMethHnd; // Profiler handle of the method being compiled. Passed as param to ELT callbacks bool compProfilerMethHndIndirected; // Whether compProfilerHandle is pointer to the handle or is an actual handle #endif public: // Assumes called as part of process shutdown; does any compiler-specific work associated with that. static void ProcessShutdownWork(ICorStaticInfo* statInfo); CompAllocator getAllocator(CompMemKind cmk = CMK_Generic) { return CompAllocator(compArenaAllocator, cmk); } CompAllocator getAllocatorGC() { return getAllocator(CMK_GC); } CompAllocator getAllocatorLoopHoist() { return getAllocator(CMK_LoopHoist); } #ifdef DEBUG CompAllocator getAllocatorDebugOnly() { return getAllocator(CMK_DebugOnly); } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX typeInfo XX XX XX XX Checks for type compatibility and merges types XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Returns true if child is equal to or a subtype of parent for merge purposes // This support is necessary to suport attributes that are not described in // for example, signatures. For example, the permanent home byref (byref that // points to the gc heap), isn't a property of method signatures, therefore, // it is safe to have mismatches here (that tiCompatibleWith will not flag), // but when deciding if we need to reimport a block, we need to take these // in account bool tiMergeCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Returns true if child is equal to or a subtype of parent. // normalisedForStack indicates that both types are normalised for the stack bool tiCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Merges pDest and pSrc. Returns false if merge is undefined. // *pDest is modified to represent the merged type. Sets "*changed" to true // if this changes "*pDest". bool tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX IL verification stuff XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // The following is used to track liveness of local variables, initialization // of valueclass constructors, and type safe use of IL instructions. // dynamic state info needed for verification EntryState verCurrentState; // this ptr of object type .ctors are considered intited only after // the base class ctor is called, or an alternate ctor is called. // An uninited this ptr can be used to access fields, but cannot // be used to call a member function. bool verTrackObjCtorInitState; void verInitBBEntryState(BasicBlock* block, EntryState* currentState); // Requires that "tis" is not TIS_Bottom -- it's a definite init/uninit state. void verSetThisInit(BasicBlock* block, ThisInitState tis); void verInitCurrentState(); void verResetCurrentState(BasicBlock* block, EntryState* currentState); // Merges the current verification state into the entry state of "block", return false if that merge fails, // TRUE if it succeeds. Further sets "*changed" to true if this changes the entry state of "block". bool verMergeEntryStates(BasicBlock* block, bool* changed); void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)); void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)); typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef = false); // converts from jit type representation to typeInfo typeInfo verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo bool verIsSDArray(const typeInfo& ti); typeInfo verGetArrayElemType(const typeInfo& ti); typeInfo verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args); bool verIsByRefLike(const typeInfo& ti); bool verIsSafeToReturnByRef(const typeInfo& ti); // generic type variables range over types that satisfy IsBoxable bool verIsBoxable(const typeInfo& ti); void DECLSPEC_NORETURN verRaiseVerifyException(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); void verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); bool verCheckTailCallConstraint(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call // on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ); bool verIsBoxedValueType(const typeInfo& ti); void verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, // is this a "readonly." call? const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)); bool verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef); typeInfo verVerifySTIND(const typeInfo& ptr, const typeInfo& value, const typeInfo& instrType); typeInfo verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType); void verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis = false); void verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode); void verVerifyThisPtrInitialised(); bool verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target); #ifdef DEBUG // One line log function. Default level is 0. Increasing it gives you // more log information // levels are currently unused: #define JITDUMP(level,...) (); void JitLogEE(unsigned level, const char* fmt, ...); bool compDebugBreak; bool compJitHaltMethod(); #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GS Security checks for unsafe buffers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: struct ShadowParamVarInfo { FixedBitVect* assignGroup; // the closure set of variables whose values depend on each other unsigned shadowCopy; // Lcl var num, if not valid set to BAD_VAR_NUM static bool mayNeedShadowCopy(LclVarDsc* varDsc) { #if defined(TARGET_AMD64) // GS cookie logic to create shadow slots, create trees to copy reg args to shadow // slots and update all trees to refer to shadow slots is done immediately after // fgMorph(). Lsra could potentially mark a param as DoNotEnregister after JIT determines // not to shadow a parameter. Also, LSRA could potentially spill a param which is passed // in register. Therefore, conservatively all params may need a shadow copy. Note that // GS cookie logic further checks whether the param is a ptr or an unsafe buffer before // creating a shadow slot even though this routine returns true. // // TODO-AMD64-CQ: Revisit this conservative approach as it could create more shadow slots than // required. There are two cases under which a reg arg could potentially be used from its // home location: // a) LSRA marks it as DoNotEnregister (see LinearScan::identifyCandidates()) // b) LSRA spills it // // Possible solution to address case (a) // - The conditions under which LSRA marks a varDsc as DoNotEnregister could be checked // in this routine. Note that live out of exception handler is something we may not be // able to do it here since GS cookie logic is invoked ahead of liveness computation. // Therefore, for methods with exception handling and need GS cookie check we might have // to take conservative approach. // // Possible solution to address case (b) // - Whenver a parameter passed in an argument register needs to be spilled by LSRA, we // create a new spill temp if the method needs GS cookie check. return varDsc->lvIsParam; #else // !defined(TARGET_AMD64) return varDsc->lvIsParam && !varDsc->lvIsRegArg; #endif } #ifdef DEBUG void Print() { printf("assignGroup [%p]; shadowCopy: [%d];\n", assignGroup, shadowCopy); } #endif }; GSCookie* gsGlobalSecurityCookieAddr; // Address of global cookie for unsafe buffer checks GSCookie gsGlobalSecurityCookieVal; // Value of global cookie if addr is NULL ShadowParamVarInfo* gsShadowVarInfo; // Table used by shadow param analysis code void gsGSChecksInitCookie(); // Grabs cookie variable void gsCopyShadowParams(); // Identify vulnerable params and create dhadow copies bool gsFindVulnerableParams(); // Shadow param analysis code void gsParamsToShadows(); // Insert copy code and replave param uses by shadow static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk #define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined. // This can be overwritten by setting complus_JITInlineSize env variable. #define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined #define DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE 32 // fixed locallocs of this size or smaller will convert to local buffers private: #ifdef FEATURE_JIT_METHOD_PERF JitTimer* pCompJitTimer; // Timer data structure (by phases) for current compilation. static CompTimeSummaryInfo s_compJitTimerSummary; // Summary of the Timer information for the whole run. static LPCWSTR JitTimeLogCsv(); // Retrieve the file name for CSV from ConfigDWORD. static LPCWSTR compJitTimeLogFilename; // If a log file for JIT time is desired, filename to write it to. #endif void BeginPhase(Phases phase); // Indicate the start of the given phase. void EndPhase(Phases phase); // Indicate the end of the given phase. #if MEASURE_CLRAPI_CALLS // Thin wrappers that call into JitTimer (if present). inline void CLRApiCallEnter(unsigned apix); inline void CLRApiCallLeave(unsigned apix); public: inline void CLR_API_Enter(API_ICorJitInfo_Names ename); inline void CLR_API_Leave(API_ICorJitInfo_Names ename); private: #endif #if defined(DEBUG) || defined(INLINE_DATA) // These variables are associated with maintaining SQM data about compile time. unsigned __int64 m_compCyclesAtEndOfInlining; // The thread-virtualized cycle count at the end of the inlining phase // in the current compilation. unsigned __int64 m_compCycles; // Net cycle count for current compilation DWORD m_compTickCountAtEndOfInlining; // The result of GetTickCount() (# ms since some epoch marker) at the end of // the inlining phase in the current compilation. #endif // defined(DEBUG) || defined(INLINE_DATA) // Records the SQM-relevant (cycles and tick count). Should be called after inlining is complete. // (We do this after inlining because this marks the last point at which the JIT is likely to cause // type-loading and class initialization). void RecordStateAtEndOfInlining(); // Assumes being called at the end of compilation. Update the SQM state. void RecordStateAtEndOfCompilation(); public: #if FUNC_INFO_LOGGING static LPCWSTR compJitFuncInfoFilename; // If a log file for per-function information is required, this is the // filename to write it to. static FILE* compJitFuncInfoFile; // And this is the actual FILE* to write to. #endif // FUNC_INFO_LOGGING Compiler* prevCompiler; // Previous compiler on stack for TLS Compiler* linked list for reentrant compilers. #if MEASURE_NOWAY void RecordNowayAssert(const char* filename, unsigned line, const char* condStr); #endif // MEASURE_NOWAY #ifndef FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(); #else // FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(const char* filename, unsigned line); // Telemetry instance to use per method compilation. JitTelemetry compJitTelemetry; // Get common parameters that have to be logged with most telemetry data. void compGetTelemetryDefaults(const char** assemblyName, const char** scopeName, const char** methodName, unsigned* methodHash); #endif // !FEATURE_TRACELOGGING #ifdef DEBUG private: NodeToTestDataMap* m_nodeTestData; static const unsigned FIRST_LOOP_HOIST_CSE_CLASS = 1000; unsigned m_loopHoistCSEClass; // LoopHoist test annotations turn into CSE requirements; we // label them with CSE Class #'s starting at FIRST_LOOP_HOIST_CSE_CLASS. // Current kept in this. public: NodeToTestDataMap* GetNodeTestData() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_nodeTestData == nullptr) { compRoot->m_nodeTestData = new (getAllocatorDebugOnly()) NodeToTestDataMap(getAllocatorDebugOnly()); } return compRoot->m_nodeTestData; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap; // Returns the set (i.e., the domain of the result map) of nodes that are keys in m_nodeTestData, and // currently occur in the AST graph. NodeToIntMap* FindReachableNodesInNodeTestData(); // Node "from" is being eliminated, and being replaced by node "to". If "from" had any associated // test data, associate that data with "to". void TransferTestDataToNode(GenTree* from, GenTree* to); // These are the methods that test that the various conditions implied by the // test attributes are satisfied. void JitTestCheckSSA(); // SSA builder tests. void JitTestCheckVN(); // Value numbering tests. #endif // DEBUG // The "FieldSeqStore", for canonicalizing field sequences. See the definition of FieldSeqStore for // operations. FieldSeqStore* m_fieldSeqStore; FieldSeqStore* GetFieldSeqStore() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_fieldSeqStore == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_FieldSeqStore)); compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc); } return compRoot->m_fieldSeqStore; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, FieldSeqNode*> NodeToFieldSeqMap; // Some nodes of "TYP_BYREF" or "TYP_I_IMPL" actually represent the address of a field within a struct, but since // the offset of the field is zero, there's no "GT_ADD" node. We normally attach a field sequence to the constant // that is added, but what do we do when that constant is zero, and is thus not present? We use this mechanism to // attach the field sequence directly to the address node. NodeToFieldSeqMap* m_zeroOffsetFieldMap; NodeToFieldSeqMap* GetZeroOffsetFieldMap() { // Don't need to worry about inlining here if (m_zeroOffsetFieldMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for // allocation. CompAllocator ialloc(getAllocator(CMK_ZeroOffsetFieldMap)); m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc); } return m_zeroOffsetFieldMap; } // Requires that "op1" is a node of type "TYP_BYREF" or "TYP_I_IMPL". We are dereferencing this with the fields in // "fieldSeq", whose offsets are required all to be zero. Ensures that any field sequence annotation currently on // "op1" or its components is augmented by appending "fieldSeq". In practice, if "op1" is a GT_LCL_FLD, it has // a field sequence as a member; otherwise, it may be the addition of an a byref and a constant, where the const // has a field sequence -- in this case "fieldSeq" is appended to that of the constant; otherwise, we // record the the field sequence using the ZeroOffsetFieldMap described above. // // One exception above is that "op1" is a node of type "TYP_REF" where "op1" is a GT_LCL_VAR. // This happens when System.Object vtable pointer is a regular field at offset 0 in System.Private.CoreLib in // CoreRT. Such case is handled same as the default case. void fgAddFieldSeqForZeroOffset(GenTree* op1, FieldSeqNode* fieldSeq); typedef JitHashTable<const GenTree*, JitPtrKeyFuncs<GenTree>, ArrayInfo> NodeToArrayInfoMap; NodeToArrayInfoMap* m_arrayInfoMap; NodeToArrayInfoMap* GetArrayInfoMap() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_arrayInfoMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_arrayInfoMap = new (ialloc) NodeToArrayInfoMap(ialloc); } return compRoot->m_arrayInfoMap; } //----------------------------------------------------------------------------------------------------------------- // Compiler::TryGetArrayInfo: // Given an indirection node, checks to see whether or not that indirection represents an array access, and // if so returns information about the array. // // Arguments: // indir - The `GT_IND` node. // arrayInfo (out) - Information about the accessed array if this function returns true. Undefined otherwise. // // Returns: // True if the `GT_IND` node represents an array access; false otherwise. bool TryGetArrayInfo(GenTreeIndir* indir, ArrayInfo* arrayInfo) { if ((indir->gtFlags & GTF_IND_ARR_INDEX) == 0) { return false; } if (indir->gtOp1->OperIs(GT_INDEX_ADDR)) { GenTreeIndexAddr* const indexAddr = indir->gtOp1->AsIndexAddr(); *arrayInfo = ArrayInfo(indexAddr->gtElemType, indexAddr->gtElemSize, indexAddr->gtElemOffset, indexAddr->gtStructElemClass); return true; } bool found = GetArrayInfoMap()->Lookup(indir, arrayInfo); assert(found); return true; } NodeToUnsignedMap* m_memorySsaMap[MemoryKindCount]; // In some cases, we want to assign intermediate SSA #'s to memory states, and know what nodes create those memory // states. (We do this for try blocks, where, if the try block doesn't do a call that loses track of the memory // state, all the possible memory states are possible initial states of the corresponding catch block(s).) NodeToUnsignedMap* GetMemorySsaMap(MemoryKind memoryKind) { if (memoryKind == GcHeap && byrefStatesMatchGcHeapStates) { // Use the same map for GCHeap and ByrefExposed when their states match. memoryKind = ByrefExposed; } assert(memoryKind < MemoryKindCount); Compiler* compRoot = impInlineRoot(); if (compRoot->m_memorySsaMap[memoryKind] == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_memorySsaMap[memoryKind] = new (ialloc) NodeToUnsignedMap(ialloc); } return compRoot->m_memorySsaMap[memoryKind]; } // The Refany type is the only struct type whose structure is implicitly assumed by IL. We need its fields. CORINFO_CLASS_HANDLE m_refAnyClass; CORINFO_FIELD_HANDLE GetRefanyDataField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 0); } CORINFO_FIELD_HANDLE GetRefanyTypeField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 1); } #if VARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_varsetOpCounter; #endif #if ALLVARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_allvarsetOpCounter; #endif static HelperCallProperties s_helperCallProperties; #ifdef UNIX_AMD64_ABI static var_types GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size); static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum); static void GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); void GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); #endif // defined(UNIX_AMD64_ABI) void fgMorphMultiregStructArgs(GenTreeCall* call); GenTree* fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr); bool killGCRefs(GenTree* tree); }; // end of class Compiler //--------------------------------------------------------------------------------------------------------------------- // GenTreeVisitor: a flexible tree walker implemented using the curiously-recurring-template pattern. // // This class implements a configurable walker for IR trees. There are five configuration options (defaults values are // shown in parentheses): // // - ComputeStack (false): when true, the walker will push each node onto the `m_ancestors` stack. "Ancestors" is a bit // of a misnomer, as the first entry will always be the current node. // // - DoPreOrder (false): when true, the walker will invoke `TVisitor::PreOrderVisit` with the current node as an // argument before visiting the node's operands. // // - DoPostOrder (false): when true, the walker will invoke `TVisitor::PostOrderVisit` with the current node as an // argument after visiting the node's operands. // // - DoLclVarsOnly (false): when true, the walker will only invoke `TVisitor::PreOrderVisit` for lclVar nodes. // `DoPreOrder` must be true if this option is true. // // - UseExecutionOrder (false): when true, then walker will visit a node's operands in execution order (e.g. if a // binary operator has the `GTF_REVERSE_OPS` flag set, the second operand will be // visited before the first). // // At least one of `DoPreOrder` and `DoPostOrder` must be specified. // // A simple pre-order visitor might look something like the following: // // class CountingVisitor final : public GenTreeVisitor<CountingVisitor> // { // public: // enum // { // DoPreOrder = true // }; // // unsigned m_count; // // CountingVisitor(Compiler* compiler) // : GenTreeVisitor<CountingVisitor>(compiler), m_count(0) // { // } // // Compiler::fgWalkResult PreOrderVisit(GenTree* node) // { // m_count++; // } // }; // // This visitor would then be used like so: // // CountingVisitor countingVisitor(compiler); // countingVisitor.WalkTree(root); // template <typename TVisitor> class GenTreeVisitor { protected: typedef Compiler::fgWalkResult fgWalkResult; enum { ComputeStack = false, DoPreOrder = false, DoPostOrder = false, DoLclVarsOnly = false, UseExecutionOrder = false, }; Compiler* m_compiler; ArrayStack<GenTree*> m_ancestors; GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler->getAllocator(CMK_ArrayStack)) { assert(compiler != nullptr); static_assert_no_msg(TVisitor::DoPreOrder || TVisitor::DoPostOrder); static_assert_no_msg(!TVisitor::DoLclVarsOnly || TVisitor::DoPreOrder); } fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } public: fgWalkResult WalkTree(GenTree** use, GenTree* user) { assert(use != nullptr); GenTree* node = *use; if (TVisitor::ComputeStack) { m_ancestors.Push(node); } fgWalkResult result = fgWalkResult::WALK_CONTINUE; if (TVisitor::DoPreOrder && !TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } node = *use; if ((node == nullptr) || (result == fgWalkResult::WALK_SKIP_SUBTREES)) { goto DONE; } } switch (node->OperGet()) { // Leaf lclVars case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Leaf nodes case GT_CATCH_ARG: case GT_LABEL: case GT_FTN_ADDR: case GT_RET_EXPR: case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_MEMORYBARRIER: case GT_JMP: case GT_JCC: case GT_SETCC: case GT_NO_OP: case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: #if !defined(FEATURE_EH_FUNCLETS) case GT_END_LFIN: #endif // !FEATURE_EH_FUNCLETS case GT_PHI_ARG: case GT_JMPTABLE: case GT_CLS_VAR: case GT_CLS_VAR_ADDR: case GT_ARGPLACE: case GT_PHYSREG: case GT_EMITNOP: case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: case GT_IL_OFFSET: break; // Lclvar unary operators case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Standard unary operators case GT_NOT: case GT_NEG: case GT_BSWAP: case GT_BSWAP16: case GT_COPY: case GT_RELOAD: case GT_ARR_LENGTH: case GT_CAST: case GT_BITCAST: case GT_CKFINITE: case GT_LCLHEAP: case GT_ADDR: case GT_IND: case GT_OBJ: case GT_BLK: case GT_BOX: case GT_ALLOCOBJ: case GT_INIT_VAL: case GT_JTRUE: case GT_SWITCH: case GT_NULLCHECK: case GT_PUTARG_REG: case GT_PUTARG_STK: case GT_PUTARG_TYPE: case GT_RETURNTRAP: case GT_NOP: case GT_FIELD: case GT_RETURN: case GT_RETFILT: case GT_RUNTIMELOOKUP: case GT_KEEPALIVE: case GT_INC_SATURATE: { GenTreeUnOp* const unOp = node->AsUnOp(); if (unOp->gtOp1 != nullptr) { result = WalkTree(&unOp->gtOp1, unOp); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } // Special nodes case GT_PHI: for (GenTreePhi::Use& use : node->AsPhi()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_FIELD_LIST: for (GenTreeFieldList::Use& use : node->AsFieldList()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_CMPXCHG: { GenTreeCmpXchg* const cmpXchg = node->AsCmpXchg(); result = WalkTree(&cmpXchg->gtOpLocation, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpValue, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpComparand, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_ARR_ELEM: { GenTreeArrElem* const arrElem = node->AsArrElem(); result = WalkTree(&arrElem->gtArrObj, arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } const unsigned rank = arrElem->gtArrRank; for (unsigned dim = 0; dim < rank; dim++) { result = WalkTree(&arrElem->gtArrInds[dim], arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } case GT_ARR_OFFSET: { GenTreeArrOffs* const arrOffs = node->AsArrOffs(); result = WalkTree(&arrOffs->gtOffset, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtIndex, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtArrObj, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_STORE_DYN_BLK: { GenTreeStoreDynBlk* const dynBlock = node->AsStoreDynBlk(); GenTree** op1Use = &dynBlock->gtOp1; GenTree** op2Use = &dynBlock->gtOp2; GenTree** op3Use = &dynBlock->gtDynamicSize; result = WalkTree(op1Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op2Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op3Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_CALL: { GenTreeCall* const call = node->AsCall(); if (call->gtCallThisArg != nullptr) { result = WalkTree(&call->gtCallThisArg->NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->Args()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->LateArgs()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtCallType == CT_INDIRECT) { if (call->gtCallCookie != nullptr) { result = WalkTree(&call->gtCallCookie, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } result = WalkTree(&call->gtCallAddr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtControlExpr != nullptr) { result = WalkTree(&call->gtControlExpr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { assert(node->AsMultiOp()->GetOperandCount() == 2); result = WalkTree(&node->AsMultiOp()->Op(2), node); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&node->AsMultiOp()->Op(1), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } else { for (GenTree** use : node->AsMultiOp()->UseEdges()) { result = WalkTree(use, node); if (result == fgWalkResult::WALK_ABORT) { return result; } } } break; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) // Binary nodes default: { assert(node->OperIsBinary()); GenTreeOp* const op = node->AsOp(); GenTree** op1Use = &op->gtOp1; GenTree** op2Use = &op->gtOp2; if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { std::swap(op1Use, op2Use); } if (*op1Use != nullptr) { result = WalkTree(op1Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (*op2Use != nullptr) { result = WalkTree(op2Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } } DONE: // Finally, visit the current node if (TVisitor::DoPostOrder) { result = reinterpret_cast<TVisitor*>(this)->PostOrderVisit(use, user); } if (TVisitor::ComputeStack) { m_ancestors.Pop(); } return result; } }; template <bool computeStack, bool doPreOrder, bool doPostOrder, bool doLclVarsOnly, bool useExecutionOrder> class GenericTreeWalker final : public GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>> { public: enum { ComputeStack = computeStack, DoPreOrder = doPreOrder, DoPostOrder = doPostOrder, DoLclVarsOnly = doLclVarsOnly, UseExecutionOrder = useExecutionOrder, }; private: Compiler::fgWalkData* m_walkData; public: GenericTreeWalker(Compiler::fgWalkData* walkData) : GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>( walkData->compiler) , m_walkData(walkData) { assert(walkData != nullptr); if (computeStack) { walkData->parentStack = &this->m_ancestors; } } Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtprVisitorFn(use, m_walkData); } Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtpoVisitorFn(use, m_walkData); } }; // A dominator tree visitor implemented using the curiously-recurring-template pattern, similar to GenTreeVisitor. template <typename TVisitor> class DomTreeVisitor { protected: Compiler* const m_compiler; DomTreeNode* const m_domTree; DomTreeVisitor(Compiler* compiler, DomTreeNode* domTree) : m_compiler(compiler), m_domTree(domTree) { } void Begin() { } void PreOrderVisit(BasicBlock* block) { } void PostOrderVisit(BasicBlock* block) { } void End() { } public: //------------------------------------------------------------------------ // WalkTree: Walk the dominator tree, starting from fgFirstBB. // // Notes: // This performs a non-recursive, non-allocating walk of the tree by using // DomTreeNode's firstChild and nextSibling links to locate the children of // a node and BasicBlock's bbIDom parent link to go back up the tree when // no more children are left. // // Forests are also supported, provided that all the roots are chained via // DomTreeNode::nextSibling to fgFirstBB. // void WalkTree() { static_cast<TVisitor*>(this)->Begin(); for (BasicBlock *next, *block = m_compiler->fgFirstBB; block != nullptr; block = next) { static_cast<TVisitor*>(this)->PreOrderVisit(block); next = m_domTree[block->bbNum].firstChild; if (next != nullptr) { assert(next->bbIDom == block); continue; } do { static_cast<TVisitor*>(this)->PostOrderVisit(block); next = m_domTree[block->bbNum].nextSibling; if (next != nullptr) { assert(next->bbIDom == block->bbIDom); break; } block = block->bbIDom; } while (block != nullptr); } static_cast<TVisitor*>(this)->End(); } }; // EHClauses: adapter class for forward iteration of the exception handling table using range-based `for`, e.g.: // for (EHblkDsc* const ehDsc : EHClauses(compiler)) // class EHClauses { EHblkDsc* m_begin; EHblkDsc* m_end; // Forward iterator for the exception handling table entries. Iteration is in table order. // class iterator { EHblkDsc* m_ehDsc; public: iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc) { } EHblkDsc* operator*() const { return m_ehDsc; } iterator& operator++() { ++m_ehDsc; return *this; } bool operator!=(const iterator& i) const { return m_ehDsc != i.m_ehDsc; } }; public: EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount) { assert((m_begin != nullptr) || (m_begin == m_end)); } iterator begin() const { return iterator(m_begin); } iterator end() const { return iterator(m_end); } }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Miscellaneous Compiler stuff XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Values used to mark the types a stack slot is used for const unsigned TYPE_REF_INT = 0x01; // slot used as a 32-bit int const unsigned TYPE_REF_LNG = 0x02; // slot used as a 64-bit long const unsigned TYPE_REF_FLT = 0x04; // slot used as a 32-bit float const unsigned TYPE_REF_DBL = 0x08; // slot used as a 64-bit float const unsigned TYPE_REF_PTR = 0x10; // slot used as a 32-bit pointer const unsigned TYPE_REF_BYR = 0x20; // slot used as a byref pointer const unsigned TYPE_REF_STC = 0x40; // slot used as a struct const unsigned TYPE_REF_TYPEMASK = 0x7F; // bits that represent the type // const unsigned TYPE_REF_ADDR_TAKEN = 0x80; // slots address was taken /***************************************************************************** * * Variables to keep track of total code amounts. */ #if DISPLAY_SIZES extern size_t grossVMsize; extern size_t grossNCsize; extern size_t totalNCsize; extern unsigned genMethodICnt; extern unsigned genMethodNCnt; extern size_t gcHeaderISize; extern size_t gcPtrMapISize; extern size_t gcHeaderNSize; extern size_t gcPtrMapNSize; #endif // DISPLAY_SIZES /***************************************************************************** * * Variables to keep track of basic block counts (more data on 1 BB methods) */ #if COUNT_BASIC_BLOCKS extern Histogram bbCntTable; extern Histogram bbOneBBSizeTable; #endif /***************************************************************************** * * Used by optFindNaturalLoops to gather statistical information such as * - total number of natural loops * - number of loops with 1, 2, ... exit conditions * - number of loops that have an iterator (for like) * - number of loops that have a constant iterator */ #if COUNT_LOOPS extern unsigned totalLoopMethods; // counts the total number of methods that have natural loops extern unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has extern unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent extern unsigned totalLoopCount; // counts the total number of natural loops extern unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops extern unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent extern unsigned iterLoopCount; // counts the # of loops with an iterator (for like) extern unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < // const) extern unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like) extern bool hasMethodLoops; // flag to keep track if we already counted a method as having loops extern unsigned loopsThisMethod; // counts the number of loops in the current method extern bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method. extern Histogram loopCountTable; // Histogram of loop counts extern Histogram loopExitCountTable; // Histogram of loop exit counts #endif // COUNT_LOOPS /***************************************************************************** * variables to keep track of how many iterations we go in a dataflow pass */ #if DATAFLOW_ITER extern unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow extern unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow #endif // DATAFLOW_ITER #if MEASURE_BLOCK_SIZE extern size_t genFlowNodeSize; extern size_t genFlowNodeCnt; #endif // MEASURE_BLOCK_SIZE #if MEASURE_NODE_SIZE struct NodeSizeStats { void Init() { genTreeNodeCnt = 0; genTreeNodeSize = 0; genTreeNodeActualSize = 0; } // Count of tree nodes allocated. unsigned __int64 genTreeNodeCnt; // The size we allocate. unsigned __int64 genTreeNodeSize; // The actual size of the node. Note that the actual size will likely be smaller // than the allocated size, but we sometimes use SetOper()/ChangeOper() to change // a smaller node to a larger one. TODO-Cleanup: add stats on // SetOper()/ChangeOper() usage to quantify this. unsigned __int64 genTreeNodeActualSize; }; extern NodeSizeStats genNodeSizeStats; // Total node size stats extern NodeSizeStats genNodeSizeStatsPerFunc; // Per-function node size stats extern Histogram genTreeNcntHist; extern Histogram genTreeNsizHist; #endif // MEASURE_NODE_SIZE /***************************************************************************** * Count fatal errors (including noway_asserts). */ #if MEASURE_FATAL extern unsigned fatal_badCode; extern unsigned fatal_noWay; extern unsigned fatal_implLimitation; extern unsigned fatal_NOMEM; extern unsigned fatal_noWayAssertBody; #ifdef DEBUG extern unsigned fatal_noWayAssertBodyArgs; #endif // DEBUG extern unsigned fatal_NYI; #endif // MEASURE_FATAL /***************************************************************************** * Codegen */ #ifdef TARGET_XARCH const instruction INS_SHIFT_LEFT_LOGICAL = INS_shl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_shr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_sar; const instruction INS_AND = INS_and; const instruction INS_OR = INS_or; const instruction INS_XOR = INS_xor; const instruction INS_NEG = INS_neg; const instruction INS_TEST = INS_test; const instruction INS_MUL = INS_imul; const instruction INS_SIGNED_DIVIDE = INS_idiv; const instruction INS_UNSIGNED_DIVIDE = INS_div; const instruction INS_BREAKPOINT = INS_int3; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbb; const instruction INS_NOT = INS_not; #endif // TARGET_XARCH #ifdef TARGET_ARM const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr; const instruction INS_AND = INS_and; const instruction INS_OR = INS_orr; const instruction INS_XOR = INS_eor; const instruction INS_NEG = INS_rsb; const instruction INS_TEST = INS_tst; const instruction INS_MUL = INS_mul; const instruction INS_MULADD = INS_mla; const instruction INS_SIGNED_DIVIDE = INS_sdiv; const instruction INS_UNSIGNED_DIVIDE = INS_udiv; const instruction INS_BREAKPOINT = INS_bkpt; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbc; const instruction INS_NOT = INS_mvn; const instruction INS_ABS = INS_vabs; const instruction INS_SQRT = INS_vsqrt; #endif // TARGET_ARM #ifdef TARGET_ARM64 const instruction INS_MULADD = INS_madd; inline const instruction INS_BREAKPOINT_osHelper() { // GDB needs the encoding of brk #0 // Windbg needs the encoding of brk #F000 return TargetOS::IsUnix ? INS_brk_unix : INS_brk_windows; } #define INS_BREAKPOINT INS_BREAKPOINT_osHelper() const instruction INS_ABS = INS_fabs; const instruction INS_SQRT = INS_fsqrt; #endif // TARGET_ARM64 /*****************************************************************************/ extern const BYTE genTypeSizes[]; extern const BYTE genTypeAlignments[]; extern const BYTE genTypeStSzs[]; extern const BYTE genActualTypes[]; /*****************************************************************************/ #ifdef DEBUG void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars); #endif // DEBUG #include "compiler.hpp" // All the shared inline functions /*****************************************************************************/ #endif //_COMPILER_H_ /*****************************************************************************/
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Represents the method data we are currently JIT-compiling. XX XX An instance of this class is created for every method we JIT. XX XX This contains all the info needed for the method. So allocating a XX XX a new instance per method makes it thread-safe. XX XX It should be used to do all the memory management for the compiler run. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ #ifndef _COMPILER_H_ #define _COMPILER_H_ /*****************************************************************************/ #include "jit.h" #include "opcode.h" #include "varset.h" #include "jitstd.h" #include "jithashtable.h" #include "gentree.h" #include "debuginfo.h" #include "lir.h" #include "block.h" #include "inline.h" #include "jiteh.h" #include "instr.h" #include "regalloc.h" #include "sm.h" #include "cycletimer.h" #include "blockset.h" #include "arraystack.h" #include "hashbv.h" #include "jitexpandarray.h" #include "tinyarray.h" #include "valuenum.h" #include "jittelemetry.h" #include "namedintrinsiclist.h" #ifdef LATE_DISASM #include "disasm.h" #endif #include "codegeninterface.h" #include "regset.h" #include "jitgcinfo.h" #if DUMP_GC_TABLES && defined(JIT32_GCENCODER) #include "gcdump.h" #endif #include "emit.h" #include "hwintrinsic.h" #include "simd.h" #include "simdashwintrinsic.h" // This is only used locally in the JIT to indicate that // a verification block should be inserted #define SEH_VERIFICATION_EXCEPTION 0xe0564552 // VER /***************************************************************************** * Forward declarations */ struct InfoHdr; // defined in GCInfo.h struct escapeMapping_t; // defined in fgdiagnostic.cpp class emitter; // defined in emit.h struct ShadowParamVarInfo; // defined in GSChecks.cpp struct InitVarDscInfo; // defined in register_arg_convention.h class FgStack; // defined in fgbasic.cpp class Instrumentor; // defined in fgprofile.cpp class SpanningTreeVisitor; // defined in fgprofile.cpp class CSE_DataFlow; // defined in OptCSE.cpp class OptBoolsDsc; // defined in optimizer.cpp #ifdef DEBUG struct IndentStack; #endif class Lowering; // defined in lower.h // The following are defined in this file, Compiler.h class Compiler; /***************************************************************************** * Unwind info */ #include "unwind.h" /*****************************************************************************/ // // Declare global operator new overloads that use the compiler's arena allocator // // I wanted to make the second argument optional, with default = CMK_Unknown, but that // caused these to be ambiguous with the global placement new operators. void* __cdecl operator new(size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new[](size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new(size_t n, void* p, const jitstd::placement_t& syntax_difference); // Requires the definitions of "operator new" so including "LoopCloning.h" after the definitions. #include "loopcloning.h" /*****************************************************************************/ /* This is included here and not earlier as it needs the definition of "CSE" * which is defined in the section above */ /*****************************************************************************/ unsigned genLog2(unsigned value); unsigned genLog2(unsigned __int64 value); unsigned ReinterpretHexAsDecimal(unsigned in); /*****************************************************************************/ const unsigned FLG_CCTOR = (CORINFO_FLG_CONSTRUCTOR | CORINFO_FLG_STATIC); #ifdef DEBUG const int BAD_STK_OFFS = 0xBAADF00D; // for LclVarDsc::lvStkOffs #endif //------------------------------------------------------------------------ // HFA info shared by LclVarDsc and fgArgTabEntry //------------------------------------------------------------------------ inline bool IsHfa(CorInfoHFAElemType kind) { return kind != CORINFO_HFA_ELEM_NONE; } inline var_types HfaTypeFromElemKind(CorInfoHFAElemType kind) { switch (kind) { case CORINFO_HFA_ELEM_FLOAT: return TYP_FLOAT; case CORINFO_HFA_ELEM_DOUBLE: return TYP_DOUBLE; #ifdef FEATURE_SIMD case CORINFO_HFA_ELEM_VECTOR64: return TYP_SIMD8; case CORINFO_HFA_ELEM_VECTOR128: return TYP_SIMD16; #endif case CORINFO_HFA_ELEM_NONE: return TYP_UNDEF; default: assert(!"Invalid HfaElemKind"); return TYP_UNDEF; } } inline CorInfoHFAElemType HfaElemKindFromType(var_types type) { switch (type) { case TYP_FLOAT: return CORINFO_HFA_ELEM_FLOAT; case TYP_DOUBLE: return CORINFO_HFA_ELEM_DOUBLE; #ifdef FEATURE_SIMD case TYP_SIMD8: return CORINFO_HFA_ELEM_VECTOR64; case TYP_SIMD16: return CORINFO_HFA_ELEM_VECTOR128; #endif case TYP_UNDEF: return CORINFO_HFA_ELEM_NONE; default: assert(!"Invalid HFA Type"); return CORINFO_HFA_ELEM_NONE; } } // The following holds the Local var info (scope information) typedef const char* VarName; // Actual ASCII string struct VarScopeDsc { unsigned vsdVarNum; // (remapped) LclVarDsc number unsigned vsdLVnum; // 'which' in eeGetLVinfo(). // Also, it is the index of this entry in the info.compVarScopes array, // which is useful since the array is also accessed via the // compEnterScopeList and compExitScopeList sorted arrays. IL_OFFSET vsdLifeBeg; // instr offset of beg of life IL_OFFSET vsdLifeEnd; // instr offset of end of life #ifdef DEBUG VarName vsdName; // name of the var #endif }; // This class stores information associated with a LclVar SSA definition. class LclSsaVarDsc { // The basic block where the definition occurs. Definitions of uninitialized variables // are considered to occur at the start of the first basic block (fgFirstBB). // // TODO-Cleanup: In the case of uninitialized variables the block is set to nullptr by // SsaBuilder and changed to fgFirstBB during value numbering. It would be useful to // investigate and perhaps eliminate this rather unexpected behavior. BasicBlock* m_block; // The GT_ASG node that generates the definition, or nullptr for definitions // of uninitialized variables. GenTreeOp* m_asg; public: LclSsaVarDsc() : m_block(nullptr), m_asg(nullptr) { } LclSsaVarDsc(BasicBlock* block) : m_block(block), m_asg(nullptr) { } LclSsaVarDsc(BasicBlock* block, GenTreeOp* asg) : m_block(block), m_asg(asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); } BasicBlock* GetBlock() const { return m_block; } void SetBlock(BasicBlock* block) { m_block = block; } GenTreeOp* GetAssignment() const { return m_asg; } void SetAssignment(GenTreeOp* asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); m_asg = asg; } ValueNumPair m_vnPair; }; // This class stores information associated with a memory SSA definition. class SsaMemDef { public: ValueNumPair m_vnPair; }; //------------------------------------------------------------------------ // SsaDefArray: A resizable array of SSA definitions. // // Unlike an ordinary resizable array implementation, this allows only element // addition (by calling AllocSsaNum) and has special handling for RESERVED_SSA_NUM // (basically it's a 1-based array). The array doesn't impose any particular // requirements on the elements it stores and AllocSsaNum forwards its arguments // to the array element constructor, this way the array supports both LclSsaVarDsc // and SsaMemDef elements. // template <typename T> class SsaDefArray { T* m_array; unsigned m_arraySize; unsigned m_count; static_assert_no_msg(SsaConfig::RESERVED_SSA_NUM == 0); static_assert_no_msg(SsaConfig::FIRST_SSA_NUM == 1); // Get the minimum valid SSA number. unsigned GetMinSsaNum() const { return SsaConfig::FIRST_SSA_NUM; } // Increase (double) the size of the array. void GrowArray(CompAllocator alloc) { unsigned oldSize = m_arraySize; unsigned newSize = max(2, oldSize * 2); T* newArray = alloc.allocate<T>(newSize); for (unsigned i = 0; i < oldSize; i++) { newArray[i] = m_array[i]; } m_array = newArray; m_arraySize = newSize; } public: // Construct an empty SsaDefArray. SsaDefArray() : m_array(nullptr), m_arraySize(0), m_count(0) { } // Reset the array (used only if the SSA form is reconstructed). void Reset() { m_count = 0; } // Allocate a new SSA number (starting with SsaConfig::FIRST_SSA_NUM). template <class... Args> unsigned AllocSsaNum(CompAllocator alloc, Args&&... args) { if (m_count == m_arraySize) { GrowArray(alloc); } unsigned ssaNum = GetMinSsaNum() + m_count; m_array[m_count++] = T(std::forward<Args>(args)...); // Ensure that the first SSA number we allocate is SsaConfig::FIRST_SSA_NUM assert((ssaNum == SsaConfig::FIRST_SSA_NUM) || (m_count > 1)); return ssaNum; } // Get the number of SSA definitions in the array. unsigned GetCount() const { return m_count; } // Get a pointer to the SSA definition at the specified index. T* GetSsaDefByIndex(unsigned index) { assert(index < m_count); return &m_array[index]; } // Check if the specified SSA number is valid. bool IsValidSsaNum(unsigned ssaNum) const { return (GetMinSsaNum() <= ssaNum) && (ssaNum < (GetMinSsaNum() + m_count)); } // Get a pointer to the SSA definition associated with the specified SSA number. T* GetSsaDef(unsigned ssaNum) { assert(ssaNum != SsaConfig::RESERVED_SSA_NUM); return GetSsaDefByIndex(ssaNum - GetMinSsaNum()); } // Get an SSA number associated with the specified SSA def (that must be in this array). unsigned GetSsaNum(T* ssaDef) { assert((m_array <= ssaDef) && (ssaDef < &m_array[m_count])); return GetMinSsaNum() + static_cast<unsigned>(ssaDef - &m_array[0]); } }; enum RefCountState { RCS_INVALID, // not valid to get/set ref counts RCS_EARLY, // early counts for struct promotion and struct passing RCS_NORMAL, // normal ref counts (from lvaMarkRefs onward) }; #ifdef DEBUG // Reasons why we can't enregister a local. enum class DoNotEnregisterReason { None, AddrExposed, // the address of this local is exposed. DontEnregStructs, // struct enregistration is disabled. NotRegSizeStruct, // the struct size does not much any register size, usually the struct size is too big. LocalField, // the local is accessed with LCL_FLD, note we can do it not only for struct locals. VMNeedsStackAddr, LiveInOutOfHandler, // the local is alive in and out of exception handler and not signle def. BlockOp, // Is read or written via a block operation. IsStructArg, // Is a struct passed as an argument in a way that requires a stack location. DepField, // It is a field of a dependently promoted struct NoRegVars, // opts.compFlags & CLFLG_REGVAR is not set MinOptsGC, // It is a GC Ref and we are compiling MinOpts #if !defined(TARGET_64BIT) LongParamField, // It is a decomposed field of a long parameter. #endif #ifdef JIT32_GCENCODER PinningRef, #endif LclAddrNode, // the local is accessed with LCL_ADDR_VAR/FLD. CastTakesAddr, StoreBlkSrc, // the local is used as STORE_BLK source. OneAsgRetyping, // fgMorphOneAsgBlockOp prevents this local from being enregister. SwizzleArg, // the local is passed using LCL_FLD as another type. BlockOpRet, // the struct is returned and it promoted or there is a cast. ReturnSpCheck, // the local is used to do SP check SimdUserForcesDep, // a promoted struct was used by a SIMD/HWI node; it must be dependently promoted HiddenBufferStructArg // the argument is a hidden return buffer passed to a method. }; enum class AddressExposedReason { NONE, PARENT_EXPOSED, // This is a promoted field but the parent is exposed. TOO_CONSERVATIVE, // Were marked as exposed to be conservative, fix these places. ESCAPE_ADDRESS, // The address is escaping, for example, passed as call argument. WIDE_INDIR, // We access via indirection with wider type. OSR_EXPOSED, // It was exposed in the original method, osr has to repeat it. STRESS_LCL_FLD, // Stress mode replaces localVar with localFld and makes them addrExposed. COPY_FLD_BY_FLD, // Field by field copy takes the address of the local, can be fixed. DISPATCH_RET_BUF // Caller return buffer dispatch. }; #endif // DEBUG class LclVarDsc { public: // The constructor. Most things can just be zero'ed. // // Initialize the ArgRegs to REG_STK. // Morph will update if this local is passed in a register. LclVarDsc() : _lvArgReg(REG_STK) , #if FEATURE_MULTIREG_ARGS _lvOtherArgReg(REG_STK) , #endif // FEATURE_MULTIREG_ARGS lvClassHnd(NO_CLASS_HANDLE) , lvRefBlks(BlockSetOps::UninitVal()) , lvPerSsaData() { } // note this only packs because var_types is a typedef of unsigned char var_types lvType : 5; // TYP_INT/LONG/FLOAT/DOUBLE/REF unsigned char lvIsParam : 1; // is this a parameter? unsigned char lvIsRegArg : 1; // is this an argument that was passed by register? unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP) unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the // variable is in the same register for the entire function. unsigned char lvTracked : 1; // is this a tracked variable? bool lvTrackedNonStruct() { return lvTracked && lvType != TYP_STRUCT; } unsigned char lvPinned : 1; // is this a pinned variable? unsigned char lvMustInit : 1; // must be initialized private: bool m_addrExposed : 1; // The address of this variable is "exposed" -- passed as an argument, stored in a // global location, etc. // We cannot reason reliably about the value of the variable. public: unsigned char lvDoNotEnregister : 1; // Do not enregister this variable. unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects // struct promotion. unsigned char lvLiveInOutOfHndlr : 1; // The variable is live in or out of an exception handler, and therefore must // be on the stack (at least at those boundaries.) unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder) unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable. unsigned char lvHasLdAddrOp : 1; // has ldloca or ldarga opcode on this local. unsigned char lvStackByref : 1; // This is a compiler temporary of TYP_BYREF that is known to point into our local // stack frame. unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local unsigned char lvHasMultipleILStoreOp : 1; // there is more than one STLOC on this local unsigned char lvIsTemp : 1; // Short-lifetime compiler temp #if defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsImplicitByRef : 1; // Set if the argument is an implicit byref. #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsBoolean : 1; // set if variable is boolean unsigned char lvSingleDef : 1; // variable has a single def // before lvaMarkLocalVars: identifies ref type locals that can get type updates // after lvaMarkLocalVars: identifies locals that are suitable for optAddCopies unsigned char lvSingleDefRegCandidate : 1; // variable has a single def and hence is a register candidate // Currently, this is only used to decide if an EH variable can be // a register candiate or not. unsigned char lvDisqualifySingleDefRegCandidate : 1; // tracks variable that are disqualified from register // candidancy unsigned char lvSpillAtSingleDef : 1; // variable has a single def (as determined by LSRA interval scan) // and is spilled making it candidate to spill right after the // first (and only) definition. // Note: We cannot reuse lvSingleDefRegCandidate because it is set // in earlier phase and the information might not be appropriate // in LSRA. unsigned char lvDisqualify : 1; // variable is no longer OK for add copy optimization unsigned char lvVolatileHint : 1; // hint for AssertionProp #ifndef TARGET_64BIT unsigned char lvStructDoubleAlign : 1; // Must we double align this struct? #endif // !TARGET_64BIT #ifdef TARGET_64BIT unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long #endif #ifdef DEBUG unsigned char lvKeepType : 1; // Don't change the type of this variable unsigned char lvNoLclFldStress : 1; // Can't apply local field stress on this one #endif unsigned char lvIsPtr : 1; // Might this be used in an address computation? (used by buffer overflow security // checks) unsigned char lvIsUnsafeBuffer : 1; // Does this contain an unsafe buffer requiring buffer overflow security checks? unsigned char lvPromoted : 1; // True when this local is a promoted struct, a normed struct, or a "split" long on a // 32-bit target. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to indicate whether // references to the arg are being rewritten as references to a promoted shadow local. unsigned char lvIsStructField : 1; // Is this local var a field of a promoted struct local? unsigned char lvOverlappingFields : 1; // True when we have a struct with possibly overlapping fields unsigned char lvContainsHoles : 1; // True when we have a promoted struct that contains holes unsigned char lvCustomLayout : 1; // True when this struct has "CustomLayout" unsigned char lvIsMultiRegArg : 1; // true if this is a multireg LclVar struct used in an argument context unsigned char lvIsMultiRegRet : 1; // true if this is a multireg LclVar struct assigned from a multireg call #ifdef DEBUG unsigned char lvHiddenBufferStructArg : 1; // True when this struct (or its field) are passed as hidden buffer // pointer. #endif #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _lvHfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif // FEATURE_HFA_FIELDS_PRESENT #ifdef DEBUG // TODO-Cleanup: See the note on lvSize() - this flag is only in use by asserts that are checking for struct // types, and is needed because of cases where TYP_STRUCT is bashed to an integral type. // Consider cleaning this up so this workaround is not required. unsigned char lvUnusedStruct : 1; // All references to this promoted struct are through its field locals. // I.e. there is no longer any reference to the struct directly. // In this case we can simply remove this struct local. unsigned char lvUndoneStructPromotion : 1; // The struct promotion was undone and hence there should be no // reference to the fields of this struct. #endif unsigned char lvLRACandidate : 1; // Tracked for linear scan register allocation purposes #ifdef FEATURE_SIMD // Note that both SIMD vector args and locals are marked as lvSIMDType = true, but the // type of an arg node is TYP_BYREF and a local node is TYP_SIMD*. unsigned char lvSIMDType : 1; // This is a SIMD struct unsigned char lvUsedInSIMDIntrinsic : 1; // This tells lclvar is used for simd intrinsic unsigned char lvSimdBaseJitType : 5; // Note: this only packs because CorInfoType has less than 32 entries CorInfoType GetSimdBaseJitType() const { return (CorInfoType)lvSimdBaseJitType; } void SetSimdBaseJitType(CorInfoType simdBaseJitType) { assert(simdBaseJitType < (1 << 5)); lvSimdBaseJitType = (unsigned char)simdBaseJitType; } var_types GetSimdBaseType() const; #endif // FEATURE_SIMD unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct. unsigned char lvClassIsExact : 1; // lvClassHandle is the exact type #ifdef DEBUG unsigned char lvClassInfoUpdated : 1; // true if this var has updated class handle or exactness #endif unsigned char lvImplicitlyReferenced : 1; // true if there are non-IR references to this local (prolog, epilog, gc, // eh) unsigned char lvSuppressedZeroInit : 1; // local needs zero init if we transform tail call to loop unsigned char lvHasExplicitInit : 1; // The local is explicitly initialized and doesn't need zero initialization in // the prolog. If the local has gc pointers, there are no gc-safe points // between the prolog and the explicit initialization. union { unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct // local. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to point to the // struct local created to model the parameter's struct promotion, if any. unsigned lvParentLcl; // The index of the local var representing the parent (i.e. the promoted struct local). // Valid on promoted struct local fields. }; unsigned char lvFieldCnt; // Number of fields in the promoted VarDsc. unsigned char lvFldOffset; unsigned char lvFldOrdinal; #ifdef DEBUG unsigned char lvSingleDefDisqualifyReason = 'H'; #endif #if FEATURE_MULTIREG_ARGS regNumber lvRegNumForSlot(unsigned slotNum) { if (slotNum == 0) { return (regNumber)_lvArgReg; } else if (slotNum == 1) { return GetOtherArgReg(); } else { assert(false && "Invalid slotNum!"); } unreached(); } #endif // FEATURE_MULTIREG_ARGS CorInfoHFAElemType GetLvHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _lvHfaElemKind; #else NOWAY_MSG("GetLvHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif // FEATURE_HFA_FIELDS_PRESENT } void SetLvHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _lvHfaElemKind = elemKind; #else NOWAY_MSG("SetLvHfaElemKind"); #endif // FEATURE_HFA_FIELDS_PRESENT } bool lvIsHfa() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetLvHfaElemKind()); } else { return false; } } bool lvIsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return lvIsRegArg && lvIsHfa(); } else { return false; } } //------------------------------------------------------------------------------ // lvHfaSlots: Get the number of slots used by an HFA local // // Return Value: // On Arm64 - Returns 1-4 indicating the number of register slots used by the HFA // On Arm32 - Returns the total number of single FP register slots used by the HFA, max is 8 // unsigned lvHfaSlots() const { assert(lvIsHfa()); assert(varTypeIsStruct(lvType)); unsigned slots = 0; #ifdef TARGET_ARM slots = lvExactSize / sizeof(float); assert(slots <= 8); #elif defined(TARGET_ARM64) switch (GetLvHfaElemKind()) { case CORINFO_HFA_ELEM_NONE: assert(!"lvHfaSlots called for non-HFA"); break; case CORINFO_HFA_ELEM_FLOAT: assert((lvExactSize % 4) == 0); slots = lvExactSize >> 2; break; case CORINFO_HFA_ELEM_DOUBLE: case CORINFO_HFA_ELEM_VECTOR64: assert((lvExactSize % 8) == 0); slots = lvExactSize >> 3; break; case CORINFO_HFA_ELEM_VECTOR128: assert((lvExactSize % 16) == 0); slots = lvExactSize >> 4; break; default: unreached(); } assert(slots <= 4); #endif // TARGET_ARM64 return slots; } // lvIsMultiRegArgOrRet() // returns true if this is a multireg LclVar struct used in an argument context // or if this is a multireg LclVar struct assigned from a multireg call bool lvIsMultiRegArgOrRet() { return lvIsMultiRegArg || lvIsMultiRegRet; } #if defined(DEBUG) private: DoNotEnregisterReason m_doNotEnregReason; AddressExposedReason m_addrExposedReason; public: void SetDoNotEnregReason(DoNotEnregisterReason reason) { m_doNotEnregReason = reason; } DoNotEnregisterReason GetDoNotEnregReason() const { return m_doNotEnregReason; } AddressExposedReason GetAddrExposedReason() const { return m_addrExposedReason; } #endif // DEBUG public: void SetAddressExposed(bool value DEBUGARG(AddressExposedReason reason)) { m_addrExposed = value; INDEBUG(m_addrExposedReason = reason); } void CleanAddressExposed() { m_addrExposed = false; } bool IsAddressExposed() const { return m_addrExposed; } #ifdef DEBUG void SetHiddenBufferStructArg(char value) { lvHiddenBufferStructArg = value; } bool IsHiddenBufferStructArg() const { return lvHiddenBufferStructArg; } #endif private: regNumberSmall _lvRegNum; // Used to store the register this variable is in (or, the low register of a // register pair). It is set during codegen any time the // variable is enregistered (lvRegister is only set // to non-zero if the variable gets the same register assignment for its entire // lifetime). #if !defined(TARGET_64BIT) regNumberSmall _lvOtherReg; // Used for "upper half" of long var. #endif // !defined(TARGET_64BIT) regNumberSmall _lvArgReg; // The (first) register in which this argument is passed. #if FEATURE_MULTIREG_ARGS regNumberSmall _lvOtherArgReg; // Used for the second part of the struct passed in a register. // Note this is defined but not used by ARM32 #endif // FEATURE_MULTIREG_ARGS regNumberSmall _lvArgInitReg; // the register into which the argument is moved at entry public: // The register number is stored in a small format (8 bits), but the getters return and the setters take // a full-size (unsigned) format, to localize the casts here. ///////////////////// regNumber GetRegNum() const { return (regNumber)_lvRegNum; } void SetRegNum(regNumber reg) { _lvRegNum = (regNumberSmall)reg; assert(_lvRegNum == reg); } ///////////////////// #if defined(TARGET_64BIT) regNumber GetOtherReg() const { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings return REG_NA; } void SetOtherReg(regNumber reg) { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings } #else // !TARGET_64BIT regNumber GetOtherReg() const { return (regNumber)_lvOtherReg; } void SetOtherReg(regNumber reg) { _lvOtherReg = (regNumberSmall)reg; assert(_lvOtherReg == reg); } #endif // !TARGET_64BIT ///////////////////// regNumber GetArgReg() const { return (regNumber)_lvArgReg; } void SetArgReg(regNumber reg) { _lvArgReg = (regNumberSmall)reg; assert(_lvArgReg == reg); } #if FEATURE_MULTIREG_ARGS regNumber GetOtherArgReg() const { return (regNumber)_lvOtherArgReg; } void SetOtherArgReg(regNumber reg) { _lvOtherArgReg = (regNumberSmall)reg; assert(_lvOtherArgReg == reg); } #endif // FEATURE_MULTIREG_ARGS #ifdef FEATURE_SIMD // Is this is a SIMD struct? bool lvIsSIMDType() const { return lvSIMDType; } // Is this is a SIMD struct which is used for SIMD intrinsic? bool lvIsUsedInSIMDIntrinsic() const { return lvUsedInSIMDIntrinsic; } #else // If feature_simd not enabled, return false bool lvIsSIMDType() const { return false; } bool lvIsUsedInSIMDIntrinsic() const { return false; } #endif ///////////////////// regNumber GetArgInitReg() const { return (regNumber)_lvArgInitReg; } void SetArgInitReg(regNumber reg) { _lvArgInitReg = (regNumberSmall)reg; assert(_lvArgInitReg == reg); } ///////////////////// bool lvIsRegCandidate() const { return lvLRACandidate != 0; } bool lvIsInReg() const { return lvIsRegCandidate() && (GetRegNum() != REG_STK); } regMaskTP lvRegMask() const { regMaskTP regMask = RBM_NONE; if (varTypeUsesFloatReg(TypeGet())) { if (GetRegNum() != REG_STK) { regMask = genRegMaskFloat(GetRegNum(), TypeGet()); } } else { if (GetRegNum() != REG_STK) { regMask = genRegMask(GetRegNum()); } } return regMask; } unsigned short lvVarIndex; // variable tracking index private: unsigned short m_lvRefCnt; // unweighted (real) reference count. For implicit by reference // parameters, this gets hijacked from fgResetImplicitByRefRefCount // through fgMarkDemotedImplicitByRefArgs, to provide a static // appearance count (computed during address-exposed analysis) // that fgMakeOutgoingStructArgCopy consults during global morph // to determine if eliding its copy is legal. weight_t m_lvRefCntWtd; // weighted reference count public: unsigned short lvRefCnt(RefCountState state = RCS_NORMAL) const; void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL); void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL); weight_t lvRefCntWtd(RefCountState state = RCS_NORMAL) const; void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL); void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL); private: int lvStkOffs; // stack offset of home in bytes. public: int GetStackOffset() const { return lvStkOffs; } void SetStackOffset(int offset) { lvStkOffs = offset; } unsigned lvExactSize; // (exact) size of the type in bytes // Is this a promoted struct? // This method returns true only for structs (including SIMD structs), not for // locals that are split on a 32-bit target. // It is only necessary to use this: // 1) if only structs are wanted, and // 2) if Lowering has already been done. // Otherwise lvPromoted is valid. bool lvPromotedStruct() { #if !defined(TARGET_64BIT) return (lvPromoted && !varTypeIsLong(lvType)); #else // defined(TARGET_64BIT) return lvPromoted; #endif // defined(TARGET_64BIT) } unsigned lvSize() const; size_t lvArgStackSize() const; unsigned lvSlotNum; // original slot # (if remapped) typeInfo lvVerTypeInfo; // type info needed for verification // class handle for the local or null if not known or not a class, // for a struct handle use `GetStructHnd()`. CORINFO_CLASS_HANDLE lvClassHnd; // Get class handle for a struct local or implicitByRef struct local. CORINFO_CLASS_HANDLE GetStructHnd() const { #ifdef FEATURE_SIMD if (lvSIMDType && (m_layout == nullptr)) { return NO_CLASS_HANDLE; } #endif assert(m_layout != nullptr); #if defined(TARGET_AMD64) || defined(TARGET_ARM64) assert(varTypeIsStruct(TypeGet()) || (lvIsImplicitByRef && (TypeGet() == TYP_BYREF))); #else assert(varTypeIsStruct(TypeGet())); #endif CORINFO_CLASS_HANDLE structHnd = m_layout->GetClassHandle(); assert(structHnd != NO_CLASS_HANDLE); return structHnd; } CORINFO_FIELD_HANDLE lvFieldHnd; // field handle for promoted struct fields private: ClassLayout* m_layout; // layout info for structs public: BlockSet lvRefBlks; // Set of blocks that contain refs Statement* lvDefStmt; // Pointer to the statement with the single definition void lvaDisqualifyVar(); // Call to disqualify a local variable from use in optAddCopies var_types TypeGet() const { return (var_types)lvType; } bool lvStackAligned() const { assert(lvIsStructField); return ((lvFldOffset % TARGET_POINTER_SIZE) == 0); } bool lvNormalizeOnLoad() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. (lvIsParam || m_addrExposed || lvIsStructField); } bool lvNormalizeOnStore() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. !(lvIsParam || m_addrExposed || lvIsStructField); } void incRefCnts(weight_t weight, Compiler* pComp, RefCountState state = RCS_NORMAL, bool propagate = true); var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { assert(lvIsHfa()); return HfaTypeFromElemKind(GetLvHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type) { if (GlobalJitOptions::compFeatureHfa) { CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetLvHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetLvHfaElemKind() == elemKind); } } // Returns true if this variable contains GC pointers (including being a GC pointer itself). bool HasGCPtr() const { return varTypeIsGC(lvType) || ((lvType == TYP_STRUCT) && m_layout->HasGCPtr()); } // Returns the layout of a struct variable. ClassLayout* GetLayout() const { assert(varTypeIsStruct(lvType)); return m_layout; } // Sets the layout of a struct variable. void SetLayout(ClassLayout* layout) { assert(varTypeIsStruct(lvType)); assert((m_layout == nullptr) || ClassLayout::AreCompatible(m_layout, layout)); m_layout = layout; } SsaDefArray<LclSsaVarDsc> lvPerSsaData; // Returns the address of the per-Ssa data for the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). LclSsaVarDsc* GetPerSsaData(unsigned ssaNum) { return lvPerSsaData.GetSsaDef(ssaNum); } // Returns the SSA number for "ssaDef". Requires "ssaDef" to be a valid definition // of this variable. unsigned GetSsaNumForSsaDef(LclSsaVarDsc* ssaDef) { return lvPerSsaData.GetSsaNum(ssaDef); } var_types GetRegisterType(const GenTreeLclVarCommon* tree) const; var_types GetRegisterType() const; var_types GetActualRegisterType() const; bool IsEnregisterableType() const { return GetRegisterType() != TYP_UNDEF; } bool IsEnregisterableLcl() const { if (lvDoNotEnregister) { return false; } return IsEnregisterableType(); } //----------------------------------------------------------------------------- // IsAlwaysAliveInMemory: Determines if this variable's value is always // up-to-date on stack. This is possible if this is an EH-var or // we decided to spill after single-def. // bool IsAlwaysAliveInMemory() const { return lvLiveInOutOfHndlr || lvSpillAtSingleDef; } bool CanBeReplacedWithItsField(Compiler* comp) const; #ifdef DEBUG public: const char* lvReason; void PrintVarReg() const { printf("%s", getRegName(GetRegNum())); } #endif // DEBUG }; // class LclVarDsc enum class SymbolicIntegerValue : int32_t { LongMin, IntMin, ShortMin, ByteMin, Zero, One, ByteMax, UByteMax, ShortMax, UShortMax, IntMax, UIntMax, LongMax, }; inline constexpr bool operator>(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) > static_cast<int32_t>(right); } inline constexpr bool operator>=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) >= static_cast<int32_t>(right); } inline constexpr bool operator<(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) < static_cast<int32_t>(right); } inline constexpr bool operator<=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) <= static_cast<int32_t>(right); } // Represents an integral range useful for reasoning about integral casts. // It uses a symbolic representation for lower and upper bounds so // that it can efficiently handle integers of all sizes on all hosts. // // Note that the ranges represented by this class are **always** in the // "signed" domain. This is so that if we know the range a node produces, it // can be trivially used to determine if a cast above the node does or does not // overflow, which requires that the interpretation of integers be the same both // for the "input" and "output". We choose signed interpretation here because it // produces nice continuous ranges and because IR uses sign-extension for constants. // // Some examples of how ranges are computed for casts: // 1. CAST_OVF(ubyte <- uint): does not overflow for [0..UBYTE_MAX], produces the // same range - all casts that do not change the representation, i. e. have the same // "actual" input and output type, have the same "input" and "output" range. // 2. CAST_OVF(ulong <- uint): never oveflows => the "input" range is [INT_MIN..INT_MAX] // (aka all possible 32 bit integers). Produces [0..UINT_MAX] (aka all possible 32 // bit integers zero-extended to 64 bits). // 3. CAST_OVF(int <- uint): overflows for inputs larger than INT_MAX <=> less than 0 // when interpreting as signed => the "input" range is [0..INT_MAX], the same range // being the produced one as the node does not change the width of the integer. // class IntegralRange { private: SymbolicIntegerValue m_lowerBound; SymbolicIntegerValue m_upperBound; public: IntegralRange() = default; IntegralRange(SymbolicIntegerValue lowerBound, SymbolicIntegerValue upperBound) : m_lowerBound(lowerBound), m_upperBound(upperBound) { assert(lowerBound <= upperBound); } bool Contains(int64_t value) const; bool Contains(IntegralRange other) const { return (m_lowerBound <= other.m_lowerBound) && (other.m_upperBound <= m_upperBound); } bool IsPositive() { return m_lowerBound >= SymbolicIntegerValue::Zero; } bool Equals(IntegralRange other) const { return (m_lowerBound == other.m_lowerBound) && (m_upperBound == other.m_upperBound); } static int64_t SymbolicToRealValue(SymbolicIntegerValue value); static SymbolicIntegerValue LowerBoundForType(var_types type); static SymbolicIntegerValue UpperBoundForType(var_types type); static IntegralRange ForType(var_types type) { return {LowerBoundForType(type), UpperBoundForType(type)}; } static IntegralRange ForNode(GenTree* node, Compiler* compiler); static IntegralRange ForCastInput(GenTreeCast* cast); static IntegralRange ForCastOutput(GenTreeCast* cast); #ifdef DEBUG static void Print(IntegralRange range); #endif // DEBUG }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX TempsInfo XX XX XX XX The temporary lclVars allocated by the compiler for code generation XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /***************************************************************************** * * The following keeps track of temporaries allocated in the stack frame * during code-generation (after register allocation). These spill-temps are * only used if we run out of registers while evaluating a tree. * * These are different from the more common temps allocated by lvaGrabTemp(). */ class TempDsc { public: TempDsc* tdNext; private: int tdOffs; #ifdef DEBUG static const int BAD_TEMP_OFFSET = 0xDDDDDDDD; // used as a sentinel "bad value" for tdOffs in DEBUG #endif // DEBUG int tdNum; BYTE tdSize; var_types tdType; public: TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) : tdNum(_tdNum), tdSize((BYTE)_tdSize), tdType(_tdType) { #ifdef DEBUG // temps must have a negative number (so they have a different number from all local variables) assert(tdNum < 0); tdOffs = BAD_TEMP_OFFSET; #endif // DEBUG if (tdNum != _tdNum) { IMPL_LIMITATION("too many spill temps"); } } #ifdef DEBUG bool tdLegalOffset() const { return tdOffs != BAD_TEMP_OFFSET; } #endif // DEBUG int tdTempOffs() const { assert(tdLegalOffset()); return tdOffs; } void tdSetTempOffs(int offs) { tdOffs = offs; assert(tdLegalOffset()); } void tdAdjustTempOffs(int offs) { tdOffs += offs; assert(tdLegalOffset()); } int tdTempNum() const { assert(tdNum < 0); return tdNum; } unsigned tdTempSize() const { return tdSize; } var_types tdTempType() const { return tdType; } }; // interface to hide linearscan implementation from rest of compiler class LinearScanInterface { public: virtual void doLinearScan() = 0; virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0; virtual bool willEnregisterLocalVars() const = 0; #if TRACK_LSRA_STATS virtual void dumpLsraStatsCsv(FILE* file) = 0; virtual void dumpLsraStatsSummary(FILE* file) = 0; #endif // TRACK_LSRA_STATS }; LinearScanInterface* getLinearScanAllocator(Compiler* comp); // Information about arrays: their element type and size, and the offset of the first element. // We label GT_IND's that are array indices with GTF_IND_ARR_INDEX, and, for such nodes, // associate an array info via the map retrieved by GetArrayInfoMap(). This information is used, // for example, in value numbering of array index expressions. struct ArrayInfo { var_types m_elemType; CORINFO_CLASS_HANDLE m_elemStructType; unsigned m_elemSize; unsigned m_elemOffset; ArrayInfo() : m_elemType(TYP_UNDEF), m_elemStructType(nullptr), m_elemSize(0), m_elemOffset(0) { } ArrayInfo(var_types elemType, unsigned elemSize, unsigned elemOffset, CORINFO_CLASS_HANDLE elemStructType) : m_elemType(elemType), m_elemStructType(elemStructType), m_elemSize(elemSize), m_elemOffset(elemOffset) { } }; // This enumeration names the phases into which we divide compilation. The phases should completely // partition a compilation. enum Phases { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) enum_nm, #include "compphases.h" PHASE_NUMBER_OF }; extern const char* PhaseNames[]; extern const char* PhaseEnums[]; extern const LPCWSTR PhaseShortNames[]; // Specify which checks should be run after each phase // enum class PhaseChecks { CHECK_NONE, CHECK_ALL }; // Specify compiler data that a phase might modify enum class PhaseStatus : unsigned { MODIFIED_NOTHING, MODIFIED_EVERYTHING }; // The following enum provides a simple 1:1 mapping to CLR API's enum API_ICorJitInfo_Names { #define DEF_CLR_API(name) API_##name, #include "ICorJitInfo_API_names.h" API_COUNT }; //--------------------------------------------------------------- // Compilation time. // // A "CompTimeInfo" is a structure for tracking the compilation time of one or more methods. // We divide a compilation into a sequence of contiguous phases, and track the total (per-thread) cycles // of the compilation, as well as the cycles for each phase. We also track the number of bytecodes. // If there is a failure in reading a timer at any point, the "CompTimeInfo" becomes invalid, as indicated // by "m_timerFailure" being true. // If FEATURE_JIT_METHOD_PERF is not set, we define a minimal form of this, enough to let other code compile. struct CompTimeInfo { #ifdef FEATURE_JIT_METHOD_PERF // The string names of the phases. static const char* PhaseNames[]; static bool PhaseHasChildren[]; static int PhaseParent[]; static bool PhaseReportsIRSize[]; unsigned m_byteCodeBytes; unsigned __int64 m_totalCycles; unsigned __int64 m_invokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_cyclesByPhase[PHASE_NUMBER_OF]; #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRinvokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_CLRcyclesByPhase[PHASE_NUMBER_OF]; #endif unsigned m_nodeCountAfterPhase[PHASE_NUMBER_OF]; // For better documentation, we call EndPhase on // non-leaf phases. We should also call EndPhase on the // last leaf subphase; obviously, the elapsed cycles between the EndPhase // for the last leaf subphase and the EndPhase for an ancestor should be very small. // We add all such "redundant end phase" intervals to this variable below; we print // it out in a report, so we can verify that it is, indeed, very small. If it ever // isn't, this means that we're doing something significant between the end of the last // declared subphase and the end of its parent. unsigned __int64 m_parentPhaseEndSlop; bool m_timerFailure; #if MEASURE_CLRAPI_CALLS // The following measures the time spent inside each individual CLR API call. unsigned m_allClrAPIcalls; unsigned m_perClrAPIcalls[API_ICorJitInfo_Names::API_COUNT]; unsigned __int64 m_allClrAPIcycles; unsigned __int64 m_perClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; unsigned __int32 m_maxClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; #endif // MEASURE_CLRAPI_CALLS CompTimeInfo(unsigned byteCodeBytes); #endif }; #ifdef FEATURE_JIT_METHOD_PERF #if MEASURE_CLRAPI_CALLS struct WrapICorJitInfo; #endif // This class summarizes the JIT time information over the course of a run: the number of methods compiled, // and the total and maximum timings. (These are instances of the "CompTimeInfo" type described above). // The operation of adding a single method's timing to the summary may be performed concurrently by several // threads, so it is protected by a lock. // This class is intended to be used as a singleton type, with only a single instance. class CompTimeSummaryInfo { // This lock protects the fields of all CompTimeSummaryInfo(s) (of which we expect there to be one). static CritSecObject s_compTimeSummaryLock; int m_numMethods; int m_totMethods; CompTimeInfo m_total; CompTimeInfo m_maximum; int m_numFilteredMethods; CompTimeInfo m_filtered; // This can use what ever data you want to determine if the value to be added // belongs in the filtered section (it's always included in the unfiltered section) bool IncludedInFilteredData(CompTimeInfo& info); public: // This is the unique CompTimeSummaryInfo object for this instance of the runtime. static CompTimeSummaryInfo s_compTimeSummary; CompTimeSummaryInfo() : m_numMethods(0), m_totMethods(0), m_total(0), m_maximum(0), m_numFilteredMethods(0), m_filtered(0) { } // Assumes that "info" is a completed CompTimeInfo for a compilation; adds it to the summary. // This is thread safe. void AddInfo(CompTimeInfo& info, bool includePhases); // Print the summary information to "f". // This is not thread-safe; assumed to be called by only one thread. void Print(FILE* f); }; // A JitTimer encapsulates a CompTimeInfo for a single compilation. It also tracks the start of compilation, // and when the current phase started. This is intended to be part of a Compilation object. // class JitTimer { unsigned __int64 m_start; // Start of the compilation. unsigned __int64 m_curPhaseStart; // Start of the current phase. #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRcallStart; // Start of the current CLR API call (if any). unsigned __int64 m_CLRcallInvokes; // CLR API invokes under current outer so far unsigned __int64 m_CLRcallCycles; // CLR API cycles under current outer so far. int m_CLRcallAPInum; // The enum/index of the current CLR API call (or -1). static double s_cyclesPerSec; // Cached for speedier measurements #endif #ifdef DEBUG Phases m_lastPhase; // The last phase that was completed (or (Phases)-1 to start). #endif CompTimeInfo m_info; // The CompTimeInfo for this compilation. static CritSecObject s_csvLock; // Lock to protect the time log file. static FILE* s_csvFile; // The time log file handle. void PrintCsvMethodStats(Compiler* comp); private: void* operator new(size_t); void* operator new[](size_t); void operator delete(void*); void operator delete[](void*); public: // Initialized the timer instance JitTimer(unsigned byteCodeSize); static JitTimer* Create(Compiler* comp, unsigned byteCodeSize) { return ::new (comp, CMK_Unknown) JitTimer(byteCodeSize); } static void PrintCsvHeader(); // Ends the current phase (argument is for a redundant check). void EndPhase(Compiler* compiler, Phases phase); #if MEASURE_CLRAPI_CALLS // Start and end a timed CLR API call. void CLRApiCallEnter(unsigned apix); void CLRApiCallLeave(unsigned apix); #endif // MEASURE_CLRAPI_CALLS // Completes the timing of the current method, which is assumed to have "byteCodeBytes" bytes of bytecode, // and adds it to "sum". void Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases); // Attempts to query the cycle counter of the current thread. If successful, returns "true" and sets // *cycles to the cycle counter value. Otherwise, returns false and sets the "m_timerFailure" flag of // "m_info" to true. bool GetThreadCycles(unsigned __int64* cycles) { bool res = CycleTimer::GetThreadCyclesS(cycles); if (!res) { m_info.m_timerFailure = true; } return res; } static void Shutdown(); }; #endif // FEATURE_JIT_METHOD_PERF //------------------- Function/Funclet info ------------------------------- enum FuncKind : BYTE { FUNC_ROOT, // The main/root function (always id==0) FUNC_HANDLER, // a funclet associated with an EH handler (finally, fault, catch, filter handler) FUNC_FILTER, // a funclet associated with an EH filter FUNC_COUNT }; class emitLocation; struct FuncInfoDsc { FuncKind funKind; BYTE funFlags; // Currently unused, just here for padding unsigned short funEHIndex; // index, into the ebd table, of innermost EH clause corresponding to this // funclet. It is only valid if funKind field indicates this is a // EH-related funclet: FUNC_HANDLER or FUNC_FILTER #if defined(TARGET_AMD64) // TODO-AMD64-Throughput: make the AMD64 info more like the ARM info to avoid having this large static array. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; UNWIND_INFO unwindHeader; // Maximum of 255 UNWIND_CODE 'nodes' and then the unwind header. If there are an odd // number of codes, the VM or Zapper will 4-byte align the whole thing. BYTE unwindCodes[offsetof(UNWIND_INFO, UnwindCode) + (0xFF * sizeof(UNWIND_CODE))]; unsigned unwindCodeSlot; #elif defined(TARGET_X86) emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #elif defined(TARGET_ARMARCH) UnwindInfo uwi; // Unwind information for this function/funclet's hot section UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section // Note: we only have a pointer here instead of the actual object, // to save memory in the JIT case (compared to the NGEN case), // where we don't have any cold section. // Note 2: we currently don't support hot/cold splitting in functions // with EH, so uwiCold will be NULL for all funclets. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #endif // TARGET_ARMARCH #if defined(FEATURE_CFI_SUPPORT) jitstd::vector<CFI_CODE>* cfiCodes; #endif // FEATURE_CFI_SUPPORT // Eventually we may want to move rsModifiedRegsMask, lvaOutgoingArgSize, and anything else // that isn't shared between the main function body and funclets. }; struct fgArgTabEntry { GenTreeCall::Use* use; // Points to the argument's GenTreeCall::Use in gtCallArgs or gtCallThisArg. GenTreeCall::Use* lateUse; // Points to the argument's GenTreeCall::Use in gtCallLateArgs, if any. // Get the node that coresponds to this argument entry. // This is the "real" node and not a placeholder or setup node. GenTree* GetNode() const { return lateUse == nullptr ? use->GetNode() : lateUse->GetNode(); } unsigned argNum; // The original argument number, also specifies the required argument evaluation order from the IL private: regNumberSmall regNums[MAX_ARG_REG_COUNT]; // The registers to use when passing this argument, set to REG_STK for // arguments passed on the stack public: unsigned numRegs; // Count of number of registers that this argument uses. // Note that on ARM, if we have a double hfa, this reflects the number // of DOUBLE registers. #if defined(UNIX_AMD64_ABI) // Unix amd64 will split floating point types and integer types in structs // between floating point and general purpose registers. Keep track of that // information so we do not need to recompute it later. unsigned structIntRegs; unsigned structFloatRegs; #endif // UNIX_AMD64_ABI #if defined(DEBUG_ARG_SLOTS) // These fields were used to calculate stack size in stack slots for arguments // but now they are replaced by precise `m_byteOffset/m_byteSize` because of // arm64 apple abi requirements. // A slot is a pointer sized region in the OutArg area. unsigned slotNum; // When an argument is passed in the OutArg area this is the slot number in the OutArg area unsigned numSlots; // Count of number of slots that this argument uses #endif // DEBUG_ARG_SLOTS // Return number of stack slots that this argument is taking. // TODO-Cleanup: this function does not align with arm64 apple model, // delete it. In most cases we just want to know if we it is using stack or not // but in some cases we are checking if it is a multireg arg, like: // `numRegs + GetStackSlotsNumber() > 1` that is harder to replace. // unsigned GetStackSlotsNumber() const { return roundUp(GetStackByteSize(), TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; } private: unsigned _lateArgInx; // index into gtCallLateArgs list; UINT_MAX if this is not a late arg. public: unsigned tmpNum; // the LclVar number if we had to force evaluation of this arg var_types argType; // The type used to pass this argument. This is generally the original argument type, but when a // struct is passed as a scalar type, this is that type. // Note that if a struct is passed by reference, this will still be the struct type. bool needTmp : 1; // True when we force this argument's evaluation into a temp LclVar bool needPlace : 1; // True when we must replace this argument with a placeholder node bool isTmp : 1; // True when we setup a temp LclVar for this argument due to size issues with the struct bool processed : 1; // True when we have decided the evaluation order for this argument in the gtCallLateArgs bool isBackFilled : 1; // True when the argument fills a register slot skipped due to alignment requirements of // previous arguments. NonStandardArgKind nonStandardArgKind : 4; // The non-standard arg kind. Non-standard args are args that are forced // to be in certain registers or on the stack, regardless of where they // appear in the arg list. bool isStruct : 1; // True if this is a struct arg bool _isVararg : 1; // True if the argument is in a vararg context. bool passedByRef : 1; // True iff the argument is passed by reference. #if FEATURE_ARG_SPLIT bool _isSplit : 1; // True when this argument is split between the registers and OutArg area #endif // FEATURE_ARG_SPLIT #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _hfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif CorInfoHFAElemType GetHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _hfaElemKind; #else NOWAY_MSG("GetHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif } void SetHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _hfaElemKind = elemKind; #else NOWAY_MSG("SetHfaElemKind"); #endif } bool isNonStandard() const { return nonStandardArgKind != NonStandardArgKind::None; } // Returns true if the IR node for this non-standarg arg is added by fgInitArgInfo. // In this case, it must be removed by GenTreeCall::ResetArgInfo. bool isNonStandardArgAddedLate() const { switch (static_cast<NonStandardArgKind>(nonStandardArgKind)) { case NonStandardArgKind::None: case NonStandardArgKind::PInvokeFrame: case NonStandardArgKind::ShiftLow: case NonStandardArgKind::ShiftHigh: case NonStandardArgKind::FixedRetBuffer: case NonStandardArgKind::ValidateIndirectCallTarget: return false; case NonStandardArgKind::WrapperDelegateCell: case NonStandardArgKind::VirtualStubCell: case NonStandardArgKind::PInvokeCookie: case NonStandardArgKind::PInvokeTarget: case NonStandardArgKind::R2RIndirectionCell: return true; default: unreached(); } } bool isLateArg() const { bool isLate = (_lateArgInx != UINT_MAX); return isLate; } unsigned GetLateArgInx() const { assert(isLateArg()); return _lateArgInx; } void SetLateArgInx(unsigned inx) { _lateArgInx = inx; } regNumber GetRegNum() const { return (regNumber)regNums[0]; } regNumber GetOtherRegNum() const { return (regNumber)regNums[1]; } #if defined(UNIX_AMD64_ABI) SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; #endif void setRegNum(unsigned int i, regNumber regNum) { assert(i < MAX_ARG_REG_COUNT); regNums[i] = (regNumberSmall)regNum; } regNumber GetRegNum(unsigned int i) { assert(i < MAX_ARG_REG_COUNT); return (regNumber)regNums[i]; } bool IsSplit() const { #if FEATURE_ARG_SPLIT return compFeatureArgSplit() && _isSplit; #else // FEATURE_ARG_SPLIT return false; #endif } void SetSplit(bool value) { #if FEATURE_ARG_SPLIT _isSplit = value; #endif } bool IsVararg() const { return compFeatureVarArg() && _isVararg; } void SetIsVararg(bool value) { if (compFeatureVarArg()) { _isVararg = value; } } bool IsHfaArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()); } else { return false; } } bool IsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()) && isPassedInRegisters(); } else { return false; } } unsigned intRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structIntRegs; } #endif // defined(UNIX_AMD64_ABI) if (!this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } unsigned floatRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structFloatRegs; } #endif // defined(UNIX_AMD64_ABI) if (this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } // Get the number of bytes that this argument is occupying on the stack, // including padding up to the target pointer size for platforms // where a stack argument can't take less. unsigned GetStackByteSize() const { if (!IsSplit() && numRegs > 0) { return 0; } assert(!IsHfaArg() || !IsSplit()); assert(GetByteSize() > TARGET_POINTER_SIZE * numRegs); const unsigned stackByteSize = GetByteSize() - TARGET_POINTER_SIZE * numRegs; return stackByteSize; } var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { return HfaTypeFromElemKind(GetHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type, unsigned hfaSlots) { if (GlobalJitOptions::compFeatureHfa) { if (type != TYP_UNDEF) { // We must already have set the passing mode. assert(numRegs != 0 || GetStackByteSize() != 0); // We originally set numRegs according to the size of the struct, but if the size of the // hfaType is not the same as the pointer size, we need to correct it. // Note that hfaSlots is the number of registers we will use. For ARM, that is twice // the number of "double registers". unsigned numHfaRegs = hfaSlots; #ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Must be an even number of registers. assert((numRegs & 1) == 0); numHfaRegs = hfaSlots / 2; } #endif // TARGET_ARM if (!IsHfaArg()) { // We haven't previously set this; do so now. CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetHfaElemKind() == elemKind); if (isPassedInRegisters()) { numRegs = numHfaRegs; } } else { // We've already set this; ensure that it's consistent. if (isPassedInRegisters()) { assert(numRegs == numHfaRegs); } assert(type == HfaTypeFromElemKind(GetHfaElemKind())); } } } } #ifdef TARGET_ARM void SetIsBackFilled(bool backFilled) { isBackFilled = backFilled; } bool IsBackFilled() const { return isBackFilled; } #else // !TARGET_ARM void SetIsBackFilled(bool backFilled) { } bool IsBackFilled() const { return false; } #endif // !TARGET_ARM bool isPassedInRegisters() const { return !IsSplit() && (numRegs != 0); } bool isPassedInFloatRegisters() const { #ifdef TARGET_X86 return false; #else return isValidFloatArgReg(GetRegNum()); #endif } // Can we replace the struct type of this node with a primitive type for argument passing? bool TryPassAsPrimitive() const { return !IsSplit() && ((numRegs == 1) || (m_byteSize <= TARGET_POINTER_SIZE)); } #if defined(DEBUG_ARG_SLOTS) // Returns the number of "slots" used, where for this purpose a // register counts as a slot. unsigned getSlotCount() const { if (isBackFilled) { assert(isPassedInRegisters()); assert(numRegs == 1); } else if (GetRegNum() == REG_STK) { assert(!isPassedInRegisters()); assert(numRegs == 0); } else { assert(numRegs > 0); } return numSlots + numRegs; } #endif #if defined(DEBUG_ARG_SLOTS) // Returns the size as a multiple of pointer-size. // For targets without HFAs, this is the same as getSlotCount(). unsigned getSize() const { unsigned size = getSlotCount(); if (GlobalJitOptions::compFeatureHfa) { if (IsHfaRegArg()) { #ifdef TARGET_ARM // We counted the number of regs, but if they are DOUBLE hfa regs we have to double the size. if (GetHfaType() == TYP_DOUBLE) { assert(!IsSplit()); size <<= 1; } #elif defined(TARGET_ARM64) // We counted the number of regs, but if they are FLOAT hfa regs we have to halve the size, // or if they are SIMD16 vector hfa regs we have to double the size. if (GetHfaType() == TYP_FLOAT) { // Round up in case of odd HFA count. size = (size + 1) >> 1; } #ifdef FEATURE_SIMD else if (GetHfaType() == TYP_SIMD16) { size <<= 1; } #endif // FEATURE_SIMD #endif // TARGET_ARM64 } } return size; } #endif // DEBUG_ARG_SLOTS private: unsigned m_byteOffset; // byte size that this argument takes including the padding after. // For example, 1-byte arg on x64 with 8-byte alignment // will have `m_byteSize == 8`, the same arg on apple arm64 will have `m_byteSize == 1`. unsigned m_byteSize; unsigned m_byteAlignment; // usually 4 or 8 bytes (slots/registers). public: void SetByteOffset(unsigned byteOffset) { DEBUG_ARG_SLOTS_ASSERT(byteOffset / TARGET_POINTER_SIZE == slotNum); m_byteOffset = byteOffset; } unsigned GetByteOffset() const { DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == slotNum); return m_byteOffset; } void SetByteSize(unsigned byteSize, bool isStruct, bool isFloatHfa) { unsigned roundedByteSize; if (compMacOsArm64Abi()) { // Only struct types need extension or rounding to pointer size, but HFA<float> does not. if (isStruct && !isFloatHfa) { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } else { roundedByteSize = byteSize; } } else { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } #if !defined(TARGET_ARM) // Arm32 could have a struct with 8 byte alignment // which rounded size % 8 is not 0. assert(m_byteAlignment != 0); assert(roundedByteSize % m_byteAlignment == 0); #endif // TARGET_ARM #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi() && !isStruct) { assert(roundedByteSize == getSlotCount() * TARGET_POINTER_SIZE); } #endif m_byteSize = roundedByteSize; } unsigned GetByteSize() const { return m_byteSize; } void SetByteAlignment(unsigned byteAlignment) { m_byteAlignment = byteAlignment; } unsigned GetByteAlignment() const { return m_byteAlignment; } // Set the register numbers for a multireg argument. // There's nothing to do on x64/Ux because the structDesc has already been used to set the // register numbers. void SetMultiRegNums() { #if FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) if (numRegs == 1) { return; } regNumber argReg = GetRegNum(0); #ifdef TARGET_ARM unsigned int regSize = (GetHfaType() == TYP_DOUBLE) ? 2 : 1; #else unsigned int regSize = 1; #endif if (numRegs > MAX_ARG_REG_COUNT) NO_WAY("Multireg argument exceeds the maximum length"); for (unsigned int regIndex = 1; regIndex < numRegs; regIndex++) { argReg = (regNumber)(argReg + regSize); setRegNum(regIndex, argReg); } #endif // FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) } #ifdef DEBUG // Check that the value of 'isStruct' is consistent. // A struct arg must be one of the following: // - A node of struct type, // - A GT_FIELD_LIST, or // - A node of a scalar type, passed in a single register or slot // (or two slots in the case of a struct pass on the stack as TYP_DOUBLE). // void checkIsStruct() const { GenTree* node = GetNode(); if (isStruct) { if (!varTypeIsStruct(node) && !node->OperIs(GT_FIELD_LIST)) { // This is the case where we are passing a struct as a primitive type. // On most targets, this is always a single register or slot. // However, on ARM this could be two slots if it is TYP_DOUBLE. bool isPassedAsPrimitiveType = ((numRegs == 1) || ((numRegs == 0) && (GetByteSize() <= TARGET_POINTER_SIZE))); #ifdef TARGET_ARM if (!isPassedAsPrimitiveType) { if (node->TypeGet() == TYP_DOUBLE && numRegs == 0 && (numSlots == 2)) { isPassedAsPrimitiveType = true; } } #endif // TARGET_ARM assert(isPassedAsPrimitiveType); } } else { assert(!varTypeIsStruct(node)); } } void Dump() const; #endif }; //------------------------------------------------------------------------- // // The class fgArgInfo is used to handle the arguments // when morphing a GT_CALL node. // class fgArgInfo { Compiler* compiler; // Back pointer to the compiler instance so that we can allocate memory GenTreeCall* callTree; // Back pointer to the GT_CALL node for this fgArgInfo unsigned argCount; // Updatable arg count value #if defined(DEBUG_ARG_SLOTS) unsigned nextSlotNum; // Updatable slot count value #endif unsigned nextStackByteOffset; unsigned stkLevel; // Stack depth when we make this call (for x86) #if defined(UNIX_X86_ABI) bool alignmentDone; // Updateable flag, set to 'true' after we've done any required alignment. unsigned stkSizeBytes; // Size of stack used by this call, in bytes. Calculated during fgMorphArgs(). unsigned padStkAlign; // Stack alignment in bytes required before arguments are pushed for this call. // Computed dynamically during codegen, based on stkSizeBytes and the current // stack level (genStackLevel) when the first stack adjustment is made for // this call. #endif #if FEATURE_FIXED_OUT_ARGS unsigned outArgSize; // Size of the out arg area for the call, will be at least MIN_ARG_AREA_FOR_CALL #endif unsigned argTableSize; // size of argTable array (equal to the argCount when done with fgMorphArgs) bool hasRegArgs; // true if we have one or more register arguments bool hasStackArgs; // true if we have one or more stack arguments bool argsComplete; // marker for state bool argsSorted; // marker for state bool needsTemps; // one or more arguments must be copied to a temp by EvalArgsToTemps fgArgTabEntry** argTable; // variable sized array of per argument descrption: (i.e. argTable[argTableSize]) private: void AddArg(fgArgTabEntry* curArgTabEntry); public: fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned argCount); fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall); fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); #ifdef UNIX_AMD64_ABI fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, const bool isStruct, const bool isFloatHfa, const bool isVararg, const regNumber otherRegNum, const unsigned structIntRegs, const unsigned structFloatRegs, const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr); #endif // UNIX_AMD64_ABI fgArgTabEntry* AddStkArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, unsigned numSlots, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); void RemorphReset(); void UpdateRegArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void UpdateStkArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots); void EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode); void ArgsComplete(); void SortArgs(); void EvalArgsToTemps(); unsigned ArgCount() const { return argCount; } fgArgTabEntry** ArgTable() const { return argTable; } #if defined(DEBUG_ARG_SLOTS) unsigned GetNextSlotNum() const { return nextSlotNum; } #endif unsigned GetNextSlotByteOffset() const { return nextStackByteOffset; } bool HasRegArgs() const { return hasRegArgs; } bool NeedsTemps() const { return needsTemps; } bool HasStackArgs() const { return hasStackArgs; } bool AreArgsComplete() const { return argsComplete; } #if FEATURE_FIXED_OUT_ARGS unsigned GetOutArgSize() const { return outArgSize; } void SetOutArgSize(unsigned newVal) { outArgSize = newVal; } #endif // FEATURE_FIXED_OUT_ARGS #if defined(UNIX_X86_ABI) void ComputeStackAlignment(unsigned curStackLevelInBytes) { padStkAlign = AlignmentPad(curStackLevelInBytes, STACK_ALIGN); } unsigned GetStkAlign() const { return padStkAlign; } void SetStkSizeBytes(unsigned newStkSizeBytes) { stkSizeBytes = newStkSizeBytes; } unsigned GetStkSizeBytes() const { return stkSizeBytes; } bool IsStkAlignmentDone() const { return alignmentDone; } void SetStkAlignmentDone() { alignmentDone = true; } #endif // defined(UNIX_X86_ABI) // Get the fgArgTabEntry for the arg at position argNum. fgArgTabEntry* GetArgEntry(unsigned argNum, bool reMorphing = true) const { fgArgTabEntry* curArgTabEntry = nullptr; if (!reMorphing) { // The arg table has not yet been sorted. curArgTabEntry = argTable[argNum]; assert(curArgTabEntry->argNum == argNum); return curArgTabEntry; } for (unsigned i = 0; i < argCount; i++) { curArgTabEntry = argTable[i]; if (curArgTabEntry->argNum == argNum) { return curArgTabEntry; } } noway_assert(!"GetArgEntry: argNum not found"); return nullptr; } void SetNeedsTemps() { needsTemps = true; } // Get the node for the arg at position argIndex. // Caller must ensure that this index is a valid arg index. GenTree* GetArgNode(unsigned argIndex) const { return GetArgEntry(argIndex)->GetNode(); } void Dump(Compiler* compiler) const; }; #ifdef DEBUG // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // We have the ability to mark source expressions with "Test Labels." // These drive assertions within the JIT, or internal JIT testing. For example, we could label expressions // that should be CSE defs, and other expressions that should uses of those defs, with a shared label. enum TestLabel // This must be kept identical to System.Runtime.CompilerServices.JitTestLabel.TestLabel. { TL_SsaName, TL_VN, // Defines a "VN equivalence class". (For full VN, including exceptions thrown). TL_VNNorm, // Like above, but uses the non-exceptional value of the expression. TL_CSE_Def, // This must be identified in the JIT as a CSE def TL_CSE_Use, // This must be identified in the JIT as a CSE use TL_LoopHoist, // Expression must (or must not) be hoisted out of the loop. }; struct TestLabelAndNum { TestLabel m_tl; ssize_t m_num; TestLabelAndNum() : m_tl(TestLabel(0)), m_num(0) { } }; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, TestLabelAndNum> NodeToTestDataMap; // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // DEBUG //------------------------------------------------------------------------- // LoopFlags: flags for the loop table. // enum LoopFlags : unsigned short { LPFLG_EMPTY = 0, // LPFLG_UNUSED = 0x0001, // LPFLG_UNUSED = 0x0002, LPFLG_ITER = 0x0004, // loop of form: for (i = icon or expression; test_condition(); i++) // LPFLG_UNUSED = 0x0008, LPFLG_CONTAINS_CALL = 0x0010, // If executing the loop body *may* execute a call // LPFLG_UNUSED = 0x0020, LPFLG_CONST_INIT = 0x0040, // iterator is initialized with a constant (found in lpConstInit) LPFLG_SIMD_LIMIT = 0x0080, // iterator is compared with vector element count (found in lpConstLimit) LPFLG_VAR_LIMIT = 0x0100, // iterator is compared with a local var (var # found in lpVarLimit) LPFLG_CONST_LIMIT = 0x0200, // iterator is compared with a constant (found in lpConstLimit) LPFLG_ARRLEN_LIMIT = 0x0400, // iterator is compared with a.len or a[i].len (found in lpArrLenLimit) LPFLG_HAS_PREHEAD = 0x0800, // lpHead is known to be a preHead for this loop LPFLG_REMOVED = 0x1000, // has been removed from the loop table (unrolled or optimized away) LPFLG_DONT_UNROLL = 0x2000, // do not unroll this loop LPFLG_ASGVARS_YES = 0x4000, // "lpAsgVars" has been computed LPFLG_ASGVARS_INC = 0x8000, // "lpAsgVars" is incomplete -- vars beyond those representable in an AllVarSet // type are assigned to. }; inline constexpr LoopFlags operator~(LoopFlags a) { return (LoopFlags)(~(unsigned short)a); } inline constexpr LoopFlags operator|(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a | (unsigned short)b); } inline constexpr LoopFlags operator&(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a & (unsigned short)b); } inline LoopFlags& operator|=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a | (unsigned short)b); } inline LoopFlags& operator&=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a & (unsigned short)b); } // The following holds information about instr offsets in terms of generated code. enum class IPmappingDscKind { Prolog, // The mapping represents the start of a prolog. Epilog, // The mapping represents the start of an epilog. NoMapping, // This does not map to any IL offset. Normal, // The mapping maps to an IL offset. }; struct IPmappingDsc { emitLocation ipmdNativeLoc; // the emitter location of the native code corresponding to the IL offset IPmappingDscKind ipmdKind; // The kind of mapping ILLocation ipmdLoc; // The location for normal mappings bool ipmdIsLabel; // Can this code be a branch label? }; struct PreciseIPMapping { emitLocation nativeLoc; DebugInfo debugInfo; }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX The big guy. The sections are currently organized as : XX XX XX XX o GenTree and BasicBlock XX XX o LclVarsInfo XX XX o Importer XX XX o FlowGraph XX XX o Optimizer XX XX o RegAlloc XX XX o EEInterface XX XX o TempsInfo XX XX o RegSet XX XX o GCInfo XX XX o Instruction XX XX o ScopeInfo XX XX o PrologScopeInfo XX XX o CodeGenerator XX XX o UnwindInfo XX XX o Compiler XX XX o typeInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ struct HWIntrinsicInfo; class Compiler { friend class emitter; friend class UnwindInfo; friend class UnwindFragmentInfo; friend class UnwindEpilogInfo; friend class JitTimer; friend class LinearScan; friend class fgArgInfo; friend class Rationalizer; friend class Phase; friend class Lowering; friend class CSE_DataFlow; friend class CSE_Heuristic; friend class CodeGenInterface; friend class CodeGen; friend class LclVarDsc; friend class TempDsc; friend class LIR; friend class ObjectAllocator; friend class LocalAddressVisitor; friend struct GenTree; friend class MorphInitBlockHelper; friend class MorphCopyBlockHelper; #ifdef FEATURE_HW_INTRINSICS friend struct HWIntrinsicInfo; #endif // FEATURE_HW_INTRINSICS #ifndef TARGET_64BIT friend class DecomposeLongs; #endif // !TARGET_64BIT /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Misc structs definitions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: hashBvGlobalData hbvGlobalData; // Used by the hashBv bitvector package. #ifdef DEBUG bool verbose; bool verboseTrees; bool shouldUseVerboseTrees(); bool asciiTrees; // If true, dump trees using only ASCII characters bool shouldDumpASCIITrees(); bool verboseSsa; // If true, produce especially verbose dump output in SSA construction. bool shouldUseVerboseSsa(); bool treesBeforeAfterMorph; // If true, print trees before/after morphing (paired by an intra-compilation id: int morphNum; // This counts the the trees that have been morphed, allowing us to label each uniquely. bool doExtraSuperPmiQueries; void makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level); // Make queries recursively 'level' deep. const char* VarNameToStr(VarName name) { return name; } DWORD expensiveDebugCheckLevel; #endif #if FEATURE_MULTIREG_RET GenTree* impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)); #endif // FEATURE_MULTIREG_RET #ifdef TARGET_X86 bool isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const; #endif // TARGET_X86 //------------------------------------------------------------------------- // Functions to handle homogeneous floating-point aggregates (HFAs) in ARM/ARM64. // HFAs are one to four element structs where each element is the same // type, either all float or all double. We handle HVAs (one to four elements of // vector types) uniformly with HFAs. HFAs are treated specially // in the ARM/ARM64 Procedure Call Standards, specifically, they are passed in // floating-point registers instead of the general purpose registers. // bool IsHfa(CORINFO_CLASS_HANDLE hClass); bool IsHfa(GenTree* tree); var_types GetHfaType(GenTree* tree); unsigned GetHfaCount(GenTree* tree); var_types GetHfaType(CORINFO_CLASS_HANDLE hClass); unsigned GetHfaCount(CORINFO_CLASS_HANDLE hClass); bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv); //------------------------------------------------------------------------- // The following is used for validating format of EH table // struct EHNodeDsc; typedef struct EHNodeDsc* pEHNodeDsc; EHNodeDsc* ehnTree; // root of the tree comprising the EHnodes. EHNodeDsc* ehnNext; // root of the tree comprising the EHnodes. struct EHNodeDsc { enum EHBlockType { TryNode, FilterNode, HandlerNode, FinallyNode, FaultNode }; EHBlockType ehnBlockType; // kind of EH block IL_OFFSET ehnStartOffset; // IL offset of start of the EH block IL_OFFSET ehnEndOffset; // IL offset past end of the EH block. (TODO: looks like verInsertEhNode() sets this to // the last IL offset, not "one past the last one", i.e., the range Start to End is // inclusive). pEHNodeDsc ehnNext; // next (non-nested) block in sequential order pEHNodeDsc ehnChild; // leftmost nested block union { pEHNodeDsc ehnTryNode; // for filters and handlers, the corresponding try node pEHNodeDsc ehnHandlerNode; // for a try node, the corresponding handler node }; pEHNodeDsc ehnFilterNode; // if this is a try node and has a filter, otherwise 0 pEHNodeDsc ehnEquivalent; // if blockType=tryNode, start offset and end offset is same, void ehnSetTryNodeType() { ehnBlockType = TryNode; } void ehnSetFilterNodeType() { ehnBlockType = FilterNode; } void ehnSetHandlerNodeType() { ehnBlockType = HandlerNode; } void ehnSetFinallyNodeType() { ehnBlockType = FinallyNode; } void ehnSetFaultNodeType() { ehnBlockType = FaultNode; } bool ehnIsTryBlock() { return ehnBlockType == TryNode; } bool ehnIsFilterBlock() { return ehnBlockType == FilterNode; } bool ehnIsHandlerBlock() { return ehnBlockType == HandlerNode; } bool ehnIsFinallyBlock() { return ehnBlockType == FinallyNode; } bool ehnIsFaultBlock() { return ehnBlockType == FaultNode; } // returns true if there is any overlap between the two nodes static bool ehnIsOverlap(pEHNodeDsc node1, pEHNodeDsc node2) { if (node1->ehnStartOffset < node2->ehnStartOffset) { return (node1->ehnEndOffset >= node2->ehnStartOffset); } else { return (node1->ehnStartOffset <= node2->ehnEndOffset); } } // fails with BADCODE if inner is not completely nested inside outer static bool ehnIsNested(pEHNodeDsc inner, pEHNodeDsc outer) { return ((inner->ehnStartOffset >= outer->ehnStartOffset) && (inner->ehnEndOffset <= outer->ehnEndOffset)); } }; //------------------------------------------------------------------------- // Exception handling functions // #if !defined(FEATURE_EH_FUNCLETS) bool ehNeedsShadowSPslots() { return (info.compXcptnsCount || opts.compDbgEnC); } // 0 for methods with no EH // 1 for methods with non-nested EH, or where only the try blocks are nested // 2 for a method with a catch within a catch // etc. unsigned ehMaxHndNestingCount; #endif // !FEATURE_EH_FUNCLETS static bool jitIsBetween(unsigned value, unsigned start, unsigned end); static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end); bool bbInCatchHandlerILRange(BasicBlock* blk); bool bbInFilterILRange(BasicBlock* blk); bool bbInTryRegions(unsigned regionIndex, BasicBlock* blk); bool bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk); bool bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk); bool bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk); unsigned short bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo); unsigned short bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex); unsigned short bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex); // Returns true if "block" is the start of a try region. bool bbIsTryBeg(BasicBlock* block); // Returns true if "block" is the start of a handler or filter region. bool bbIsHandlerBeg(BasicBlock* block); // Returns true iff "block" is where control flows if an exception is raised in the // try region, and sets "*regionIndex" to the index of the try for the handler. // Differs from "IsHandlerBeg" in the case of filters, where this is true for the first // block of the filter, but not for the filter's handler. bool bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex); bool ehHasCallableHandlers(); // Return the EH descriptor for the given region index. EHblkDsc* ehGetDsc(unsigned regionIndex); // Return the EH index given a region descriptor. unsigned ehGetIndex(EHblkDsc* ehDsc); // Return the EH descriptor index of the enclosing try, for the given region index. unsigned ehGetEnclosingTryIndex(unsigned regionIndex); // Return the EH descriptor index of the enclosing handler, for the given region index. unsigned ehGetEnclosingHndIndex(unsigned regionIndex); // Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of (or nullptr if this // block is not in a 'try' region). EHblkDsc* ehGetBlockTryDsc(BasicBlock* block); // Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of (or nullptr // if this block is not in a filter or handler region). EHblkDsc* ehGetBlockHndDsc(BasicBlock* block); // Return the EH descriptor for the most nested region that may handle exceptions raised in this BasicBlock (or // nullptr if this block's exceptions propagate to caller). EHblkDsc* ehGetBlockExnFlowDsc(BasicBlock* block); EHblkDsc* ehIsBlockTryLast(BasicBlock* block); EHblkDsc* ehIsBlockHndLast(BasicBlock* block); bool ehIsBlockEHLast(BasicBlock* block); bool ehBlockHasExnFlowDsc(BasicBlock* block); // Return the region index of the most nested EH region this block is in. unsigned ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion); // Find the true enclosing try index, ignoring 'mutual protect' try. Uses IL ranges to check. unsigned ehTrueEnclosingTryIndexIL(unsigned regionIndex); // Return the index of the most nested enclosing region for a particular EH region. Returns NO_ENCLOSING_INDEX // if there is no enclosing region. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' // is set to 'true' if the enclosing region is a 'try', or 'false' if the enclosing region is a handler. // (It can never be a filter.) unsigned ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion); // A block has been deleted. Update the EH table appropriately. void ehUpdateForDeletedBlock(BasicBlock* block); // Determine whether a block can be deleted while preserving the EH normalization rules. bool ehCanDeleteEmptyBlock(BasicBlock* block); // Update the 'last' pointers in the EH table to reflect new or deleted blocks in an EH region. void ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast); // For a finally handler, find the region index that the BBJ_CALLFINALLY lives in that calls the handler, // or NO_ENCLOSING_INDEX if the BBJ_CALLFINALLY lives in the main function body. Normally, the index // is the same index as the handler (and the BBJ_CALLFINALLY lives in the 'try' region), but for AMD64 the // BBJ_CALLFINALLY lives in the enclosing try or handler region, whichever is more nested, or the main function // body. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' is set to 'true' if the // BBJ_CALLFINALLY lives in the returned index's 'try' region, or 'false' if lives in the handler region. (It never // lives in a filter.) unsigned ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion); // Find the range of basic blocks in which all BBJ_CALLFINALLY will be found that target the 'finallyIndex' region's // handler. Set begBlk to the first block, and endBlk to the block after the last block of the range // (nullptr if the last block is the last block in the program). // Precondition: 'finallyIndex' is the EH region of a try/finally clause. void ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk); #ifdef DEBUG // Given a BBJ_CALLFINALLY block and the EH region index of the finally it is calling, return // 'true' if the BBJ_CALLFINALLY is in the correct EH region. bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex); #endif // DEBUG #if defined(FEATURE_EH_FUNCLETS) // Do we need a PSPSym in the main function? For codegen purposes, we only need one // if there is a filter that protects a region with a nested EH clause (such as a // try/catch nested in the 'try' body of a try/filter/filter-handler). See // genFuncletProlog() for more details. However, the VM seems to use it for more // purposes, maybe including debugging. Until we are sure otherwise, always create // a PSPSym for functions with any EH. bool ehNeedsPSPSym() const { #ifdef TARGET_X86 return false; #else // TARGET_X86 return compHndBBtabCount > 0; #endif // TARGET_X86 } bool ehAnyFunclets(); // Are there any funclets in this function? unsigned ehFuncletCount(); // Return the count of funclets in the function unsigned bbThrowIndex(BasicBlock* blk); // Get the index to use as the cache key for sharing throw blocks #else // !FEATURE_EH_FUNCLETS bool ehAnyFunclets() { return false; } unsigned ehFuncletCount() { return 0; } unsigned bbThrowIndex(BasicBlock* blk) { return blk->bbTryIndex; } // Get the index to use as the cache key for sharing throw blocks #endif // !FEATURE_EH_FUNCLETS // Returns a flowList representing the "EH predecessors" of "blk". These are the normal predecessors of // "blk", plus one special case: if "blk" is the first block of a handler, considers the predecessor(s) of the first // first block of the corresponding try region to be "EH predecessors". (If there is a single such predecessor, // for example, we want to consider that the immediate dominator of the catch clause start block, so it's // convenient to also consider it a predecessor.) flowList* BlockPredsWithEH(BasicBlock* blk); // This table is useful for memoization of the method above. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, flowList*> BlockToFlowListMap; BlockToFlowListMap* m_blockToEHPreds; BlockToFlowListMap* GetBlockToEHPreds() { if (m_blockToEHPreds == nullptr) { m_blockToEHPreds = new (getAllocator()) BlockToFlowListMap(getAllocator()); } return m_blockToEHPreds; } void* ehEmitCookie(BasicBlock* block); UNATIVE_OFFSET ehCodeOffset(BasicBlock* block); EHblkDsc* ehInitHndRange(BasicBlock* src, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter); EHblkDsc* ehInitTryRange(BasicBlock* src, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd); EHblkDsc* ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter); EHblkDsc* ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast); void fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg); void fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast); void fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast); void fgSkipRmvdBlocks(EHblkDsc* handlerTab); void fgAllocEHTable(); void fgRemoveEHTableEntry(unsigned XTnum); #if defined(FEATURE_EH_FUNCLETS) EHblkDsc* fgAddEHTableEntry(unsigned XTnum); #endif // FEATURE_EH_FUNCLETS #if !FEATURE_EH void fgRemoveEH(); #endif // !FEATURE_EH void fgSortEHTable(); // Causes the EH table to obey some well-formedness conditions, by inserting // empty BB's when necessary: // * No block is both the first block of a handler and the first block of a try. // * No block is the first block of multiple 'try' regions. // * No block is the last block of multiple EH regions. void fgNormalizeEH(); bool fgNormalizeEHCase1(); bool fgNormalizeEHCase2(); bool fgNormalizeEHCase3(); void fgCheckForLoopsInHandlers(); #ifdef DEBUG void dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void fgVerifyHandlerTab(); void fgDispHandlerTab(); #endif // DEBUG bool fgNeedToSortEHTable; void verInitEHTree(unsigned numEHClauses); void verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab); void verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node); void verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node); void verCheckNestingLevel(EHNodeDsc* initRoot); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GenTree and BasicBlock XX XX XX XX Functions to allocate and display the GenTrees and BasicBlocks XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Functions to create nodes Statement* gtNewStmt(GenTree* expr = nullptr); Statement* gtNewStmt(GenTree* expr, const DebugInfo& di); // For unary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, bool doSimplifications = TRUE); // For binary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2); GenTreeColon* gtNewColonNode(var_types type, GenTree* elseNode, GenTree* thenNode); GenTreeQmark* gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon); GenTree* gtNewLargeOperNode(genTreeOps oper, var_types type = TYP_I_IMPL, GenTree* op1 = nullptr, GenTree* op2 = nullptr); GenTreeIntCon* gtNewIconNode(ssize_t value, var_types type = TYP_INT); GenTreeIntCon* gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq); GenTreeIntCon* gtNewNull(); GenTreeIntCon* gtNewTrue(); GenTreeIntCon* gtNewFalse(); GenTree* gtNewPhysRegNode(regNumber reg, var_types type); GenTree* gtNewJmpTableNode(); GenTree* gtNewIndOfIconHandleNode(var_types indType, size_t value, GenTreeFlags iconFlags, bool isInvariant); GenTree* gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields = nullptr); GenTreeFlags gtTokenToIconFlags(unsigned token); GenTree* gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags flags, void* compileTimeHandle); GenTree* gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd); GenTree* gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd); GenTree* gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd); GenTree* gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd); GenTree* gtNewStringLiteralNode(InfoAccessType iat, void* pValue); GenTreeIntCon* gtNewStringLiteralLength(GenTreeStrCon* node); GenTree* gtNewLconNode(__int64 value); GenTree* gtNewDconNode(double value, var_types type = TYP_DOUBLE); GenTree* gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle); GenTree* gtNewZeroConNode(var_types type); GenTree* gtNewOneConNode(var_types type); GenTreeLclVar* gtNewStoreLclVar(unsigned dstLclNum, GenTree* src); #ifdef FEATURE_SIMD GenTree* gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize); #endif GenTree* gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock); GenTree* gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg); GenTree* gtNewBitCastNode(var_types type, GenTree* arg); protected: void gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile); public: GenTreeObj* gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); void gtSetObjGcInfo(GenTreeObj* objNode); GenTree* gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); GenTree* gtNewBlockVal(GenTree* addr, unsigned size); GenTree* gtNewCpObjNode(GenTree* dst, GenTree* src, CORINFO_CLASS_HANDLE structHnd, bool isVolatile); GenTreeCall::Use* gtNewCallArgs(GenTree* node); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4); GenTreeCall::Use* gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args); GenTreeCall::Use* gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after); GenTreeCall* gtNewCallNode(gtCallTypes callType, CORINFO_METHOD_HANDLE handle, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewIndCallNode(GenTree* addr, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewHelperCallNode(unsigned helper, var_types type, GenTreeCall::Use* args = nullptr); GenTreeCall* gtNewRuntimeLookupHelperCallNode(CORINFO_RUNTIME_LOOKUP* pRuntimeLookup, GenTree* ctxTree, void* compileTimeHandle); GenTreeLclVar* gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclVarAddrNode(unsigned lclNum, var_types type = TYP_I_IMPL); GenTreeLclFld* gtNewLclFldAddrNode(unsigned lclNum, unsigned lclOffs, FieldSeqNode* fieldSeq, var_types type = TYP_I_IMPL); #ifdef FEATURE_SIMD GenTreeSIMD* gtNewSIMDNode( var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); GenTreeSIMD* gtNewSIMDNode(var_types type, GenTree* op1, GenTree* op2, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); void SetOpLclRelatedToSIMDIntrinsic(GenTree* op); #endif #ifdef FEATURE_HW_INTRINSICS GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, GenTree* op4, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree** operands, size_t operandCount, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, IntrinsicNodeBuilder&& nodeBuilder, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode( var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTree* gtNewSimdAbsNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdBinOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCeilNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAllNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAnyNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCndSelNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCreateBroadcastNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdDotProdNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdFloorNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdGetElementNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMaxNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMinNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdNarrowNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSqrtNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSumNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdUnOpNode(genTreeOps op, var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenLowerNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenUpperNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWithElementNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdZeroNode(var_types type, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode( var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID); CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType); CorInfoType getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType); #endif // FEATURE_HW_INTRINSICS GenTree* gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTreeLclFld* gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset); GenTree* gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags); GenTreeField* gtNewFieldRef(var_types type, CORINFO_FIELD_HANDLE fldHnd, GenTree* obj = nullptr, DWORD offset = 0); GenTree* gtNewIndexRef(var_types typ, GenTree* arrayOp, GenTree* indexOp); GenTreeArrLen* gtNewArrLen(var_types typ, GenTree* arrayOp, int lenOffset, BasicBlock* block); GenTreeIndir* gtNewIndir(var_types typ, GenTree* addr); GenTree* gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock); var_types gtTypeForNullCheck(GenTree* tree); void gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block); static fgArgTabEntry* gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum); static fgArgTabEntry* gtArgEntryByNode(GenTreeCall* call, GenTree* node); fgArgTabEntry* gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx); static GenTree* gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx); GenTreeOp* gtNewAssignNode(GenTree* dst, GenTree* src); GenTree* gtNewTempAssign(unsigned tmp, GenTree* val, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* gtNewRefCOMfield(GenTree* objPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp, CORINFO_CLASS_HANDLE structType, GenTree* assg); GenTree* gtNewNothingNode(); GenTree* gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTree* gtUnusedValNode(GenTree* expr); GenTree* gtNewKeepAliveNode(GenTree* op); GenTreeCast* gtNewCastNode(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeCast* gtNewCastNodeL(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeAllocObj* gtNewAllocObjNode( unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTree* op1); GenTreeAllocObj* gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent); GenTree* gtNewRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* lookupTree); GenTreeIndir* gtNewMethodTableLookup(GenTree* obj); //------------------------------------------------------------------------ // Other GenTree functions GenTree* gtClone(GenTree* tree, bool complexOK = false); // If `tree` is a lclVar with lclNum `varNum`, return an IntCns with value `varVal`; otherwise, // create a copy of `tree`, adding specified flags, replacing uses of lclVar `deepVarNum` with // IntCnses with value `deepVarVal`. GenTree* gtCloneExpr( GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal); // Create a copy of `tree`, optionally adding specifed flags, and optionally mapping uses of local // `varNum` to int constants with value `varVal`. GenTree* gtCloneExpr(GenTree* tree, GenTreeFlags addFlags = GTF_EMPTY, unsigned varNum = BAD_VAR_NUM, int varVal = 0) { return gtCloneExpr(tree, addFlags, varNum, varVal, varNum, varVal); } Statement* gtCloneStmt(Statement* stmt) { GenTree* exprClone = gtCloneExpr(stmt->GetRootNode()); return gtNewStmt(exprClone, stmt->GetDebugInfo()); } // Internal helper for cloning a call GenTreeCall* gtCloneExprCallHelper(GenTreeCall* call, GenTreeFlags addFlags = GTF_EMPTY, unsigned deepVarNum = BAD_VAR_NUM, int deepVarVal = 0); // Create copy of an inline or guarded devirtualization candidate tree. GenTreeCall* gtCloneCandidateCall(GenTreeCall* call); void gtUpdateSideEffects(Statement* stmt, GenTree* tree); void gtUpdateTreeAncestorsSideEffects(GenTree* tree); void gtUpdateStmtSideEffects(Statement* stmt); void gtUpdateNodeSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffectsPost(GenTree* tree); // Returns "true" iff the complexity (not formally defined, but first interpretation // is #of nodes in subtree) of "tree" is greater than "limit". // (This is somewhat redundant with the "GetCostEx()/GetCostSz()" fields, but can be used // before they have been set.) bool gtComplexityExceeds(GenTree** tree, unsigned limit); GenTree* gtReverseCond(GenTree* tree); static bool gtHasRef(GenTree* tree, ssize_t lclNum); bool gtHasLocalsWithAddrOp(GenTree* tree); unsigned gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz); unsigned gtSetMultiOpOrder(GenTreeMultiOp* multiOp); void gtWalkOp(GenTree** op1, GenTree** op2, GenTree* base, bool constOnly); #ifdef DEBUG unsigned gtHashValue(GenTree* tree); GenTree* gtWalkOpEffectiveVal(GenTree* op); #endif void gtPrepareCost(GenTree* tree); bool gtIsLikelyRegVar(GenTree* tree); // Returns true iff the secondNode can be swapped with firstNode. bool gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode); // Given an address expression, compute its costs and addressing mode opportunities, // and mark addressing mode candidates as GTF_DONT_CSE. // TODO-Throughput - Consider actually instantiating these early, to avoid // having to re-run the algorithm that looks for them (might also improve CQ). bool gtMarkAddrMode(GenTree* addr, int* costEx, int* costSz, var_types type); unsigned gtSetEvalOrder(GenTree* tree); void gtSetStmtInfo(Statement* stmt); // Returns "true" iff "node" has any of the side effects in "flags". bool gtNodeHasSideEffects(GenTree* node, GenTreeFlags flags); // Returns "true" iff "tree" or its (transitive) children have any of the side effects in "flags". bool gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags); // Appends 'expr' in front of 'list' // 'list' will typically start off as 'nullptr' // when 'list' is non-null a GT_COMMA node is used to insert 'expr' GenTree* gtBuildCommaList(GenTree* list, GenTree* expr); void gtExtractSideEffList(GenTree* expr, GenTree** pList, GenTreeFlags GenTreeFlags = GTF_SIDE_EFFECT, bool ignoreRoot = false); GenTree* gtGetThisArg(GenTreeCall* call); // Static fields of struct types (and sometimes the types that those are reduced to) are represented by having the // static field contain an object pointer to the boxed struct. This simplifies the GC implementation...but // complicates the JIT somewhat. This predicate returns "true" iff a node with type "fieldNodeType", representing // the given "fldHnd", is such an object pointer. bool gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd); // Return true if call is a recursive call; return false otherwise. // Note when inlining, this looks for calls back to the root method. bool gtIsRecursiveCall(GenTreeCall* call) { return gtIsRecursiveCall(call->gtCallMethHnd); } bool gtIsRecursiveCall(CORINFO_METHOD_HANDLE callMethodHandle) { return (callMethodHandle == impInlineRoot()->info.compMethodHnd); } //------------------------------------------------------------------------- GenTree* gtFoldExpr(GenTree* tree); GenTree* gtFoldExprConst(GenTree* tree); GenTree* gtFoldExprSpecial(GenTree* tree); GenTree* gtFoldBoxNullable(GenTree* tree); GenTree* gtFoldExprCompare(GenTree* tree); GenTree* gtCreateHandleCompare(genTreeOps oper, GenTree* op1, GenTree* op2, CorInfoInlineTypeCheck typeCheckInliningResult); GenTree* gtFoldExprCall(GenTreeCall* call); GenTree* gtFoldTypeCompare(GenTree* tree); GenTree* gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2); // Options to control behavior of gtTryRemoveBoxUpstreamEffects enum BoxRemovalOptions { BR_REMOVE_AND_NARROW, // remove effects, minimize remaining work, return possibly narrowed source tree BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE, // remove effects and minimize remaining work, return type handle tree BR_REMOVE_BUT_NOT_NARROW, // remove effects, return original source tree BR_DONT_REMOVE, // check if removal is possible, return copy source tree BR_DONT_REMOVE_WANT_TYPE_HANDLE, // check if removal is possible, return type handle tree BR_MAKE_LOCAL_COPY // revise box to copy to temp local and return local's address }; GenTree* gtTryRemoveBoxUpstreamEffects(GenTree* tree, BoxRemovalOptions options = BR_REMOVE_AND_NARROW); GenTree* gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp); //------------------------------------------------------------------------- // Get the handle, if any. CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent(GenTree* tree); // Get the handle, and assert if not found. CORINFO_CLASS_HANDLE gtGetStructHandle(GenTree* tree); // Get the handle for a ref type. CORINFO_CLASS_HANDLE gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull); // Get the class handle for an helper call CORINFO_CLASS_HANDLE gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull); // Get the element handle for an array of ref type. CORINFO_CLASS_HANDLE gtGetArrayElementClassHandle(GenTree* array); // Get a class handle from a helper call argument CORINFO_CLASS_HANDLE gtGetHelperArgClassHandle(GenTree* array); // Get the class handle for a field CORINFO_CLASS_HANDLE gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull); // Check if this tree is a gc static base helper call bool gtIsStaticGCBaseHelperCall(GenTree* tree); //------------------------------------------------------------------------- // Functions to display the trees #ifdef DEBUG void gtDispNode(GenTree* tree, IndentStack* indentStack, _In_z_ const char* msg, bool isLIR); void gtDispConst(GenTree* tree); void gtDispLeaf(GenTree* tree, IndentStack* indentStack); void gtDispNodeName(GenTree* tree); #if FEATURE_MULTIREG_RET unsigned gtDispMultiRegCount(GenTree* tree); #endif void gtDispRegVal(GenTree* tree); void gtDispZeroFieldSeq(GenTree* tree); void gtDispVN(GenTree* tree); void gtDispCommonEndLine(GenTree* tree); enum IndentInfo { IINone, IIArc, IIArcTop, IIArcBottom, IIEmbedded, IIError, IndentInfoCount }; void gtDispChild(GenTree* child, IndentStack* indentStack, IndentInfo arcType, _In_opt_ const char* msg = nullptr, bool topOnly = false); void gtDispTree(GenTree* tree, IndentStack* indentStack = nullptr, _In_opt_ const char* msg = nullptr, bool topOnly = false, bool isLIR = false); void gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut); int gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining); char* gtGetLclVarName(unsigned lclNum); void gtDispLclVar(unsigned lclNum, bool padForBiggestDisp = true); void gtDispLclVarStructType(unsigned lclNum); void gtDispClassLayout(ClassLayout* layout, var_types type); void gtDispILLocation(const ILLocation& loc); void gtDispStmt(Statement* stmt, const char* msg = nullptr); void gtDispBlockStmts(BasicBlock* block); void gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength); void gtGetLateArgMsg(GenTreeCall* call, GenTree* arg, int argNum, char* bufp, unsigned bufLength); void gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack); void gtDispAnyFieldSeq(FieldSeqNode* fieldSeq); void gtDispFieldSeq(FieldSeqNode* pfsn); void gtDispRange(LIR::ReadOnlyRange const& range); void gtDispTreeRange(LIR::Range& containingRange, GenTree* tree); void gtDispLIRNode(GenTree* node, const char* prefixMsg = nullptr); #endif // For tree walks enum fgWalkResult { WALK_CONTINUE, WALK_SKIP_SUBTREES, WALK_ABORT }; struct fgWalkData; typedef fgWalkResult(fgWalkPreFn)(GenTree** pTree, fgWalkData* data); typedef fgWalkResult(fgWalkPostFn)(GenTree** pTree, fgWalkData* data); static fgWalkPreFn gtMarkColonCond; static fgWalkPreFn gtClearColonCond; struct FindLinkData { GenTree* nodeToFind; GenTree** result; GenTree* parent; }; FindLinkData gtFindLink(Statement* stmt, GenTree* node); bool gtHasCatchArg(GenTree* tree); typedef ArrayStack<GenTree*> GenTreeStack; static bool gtHasCallOnStack(GenTreeStack* parentStack); //========================================================================= // BasicBlock functions #ifdef DEBUG // This is a debug flag we will use to assert when creating block during codegen // as this interferes with procedure splitting. If you know what you're doing, set // it to true before creating the block. (DEBUG only) bool fgSafeBasicBlockCreation; #endif BasicBlock* bbNewBasicBlock(BBjumpKinds jumpKind); void placeLoopAlignInstructions(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LclVarsInfo XX XX XX XX The variables to be used by the code generator. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // // For both PROMOTION_TYPE_NONE and PROMOTION_TYPE_DEPENDENT the struct will // be placed in the stack frame and it's fields must be laid out sequentially. // // For PROMOTION_TYPE_INDEPENDENT each of the struct's fields is replaced by // a local variable that can be enregistered or placed in the stack frame. // The fields do not need to be laid out sequentially // enum lvaPromotionType { PROMOTION_TYPE_NONE, // The struct local is not promoted PROMOTION_TYPE_INDEPENDENT, // The struct local is promoted, // and its field locals are independent of its parent struct local. PROMOTION_TYPE_DEPENDENT // The struct local is promoted, // but its field locals depend on its parent struct local. }; /*****************************************************************************/ enum FrameLayoutState { NO_FRAME_LAYOUT, INITIAL_FRAME_LAYOUT, PRE_REGALLOC_FRAME_LAYOUT, REGALLOC_FRAME_LAYOUT, TENTATIVE_FRAME_LAYOUT, FINAL_FRAME_LAYOUT }; public: RefCountState lvaRefCountState; // Current local ref count state bool lvaLocalVarRefCounted() const { return lvaRefCountState == RCS_NORMAL; } bool lvaTrackedFixed; // true: We cannot add new 'tracked' variable unsigned lvaCount; // total number of locals, which includes function arguments, // special arguments, IL local variables, and JIT temporary variables LclVarDsc* lvaTable; // variable descriptor table unsigned lvaTableCnt; // lvaTable size (>= lvaCount) unsigned lvaTrackedCount; // actual # of locals being tracked unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked #ifdef DEBUG VARSET_TP lvaTrackedVars; // set of tracked variables #endif #ifndef TARGET_64BIT VARSET_TP lvaLongVars; // set of long (64-bit) variables #endif VARSET_TP lvaFloatVars; // set of floating-point (32-bit and 64-bit) variables unsigned lvaCurEpoch; // VarSets are relative to a specific set of tracked var indices. // It that changes, this changes. VarSets from different epochs // cannot be meaningfully combined. unsigned GetCurLVEpoch() { return lvaCurEpoch; } // reverse map of tracked number to var number unsigned lvaTrackedToVarNumSize; unsigned* lvaTrackedToVarNum; #if DOUBLE_ALIGN #ifdef DEBUG // # of procs compiled a with double-aligned stack static unsigned s_lvaDoubleAlignedProcsCount; #endif #endif // Getters and setters for address-exposed and do-not-enregister local var properties. bool lvaVarAddrExposed(unsigned varNum) const; void lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason)); void lvaSetHiddenBufferStructArg(unsigned varNum); void lvaSetVarLiveInOutOfHandler(unsigned varNum); bool lvaVarDoNotEnregister(unsigned varNum); void lvSetMinOptsDoNotEnreg(); bool lvaEnregEHVars; bool lvaEnregMultiRegVars; void lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason)); unsigned lvaVarargsHandleArg; #ifdef TARGET_X86 unsigned lvaVarargsBaseOfStkArgs; // Pointer (computed based on incoming varargs handle) to the start of the stack // arguments #endif // TARGET_X86 unsigned lvaInlinedPInvokeFrameVar; // variable representing the InlinedCallFrame unsigned lvaReversePInvokeFrameVar; // variable representing the reverse PInvoke frame #if FEATURE_FIXED_OUT_ARGS unsigned lvaPInvokeFrameRegSaveVar; // variable representing the RegSave for PInvoke inlining. #endif unsigned lvaMonAcquired; // boolean variable introduced into in synchronized methods // that tracks whether the lock has been taken unsigned lvaArg0Var; // The lclNum of arg0. Normally this will be info.compThisArg. // However, if there is a "ldarga 0" or "starg 0" in the IL, // we will redirect all "ldarg(a) 0" and "starg 0" to this temp. unsigned lvaInlineeReturnSpillTemp; // The temp to spill the non-VOID return expression // in case there are multiple BBJ_RETURN blocks in the inlinee // or if the inlinee has GC ref locals. #if FEATURE_FIXED_OUT_ARGS unsigned lvaOutgoingArgSpaceVar; // dummy TYP_LCLBLK var for fixed outgoing argument space PhasedVar<unsigned> lvaOutgoingArgSpaceSize; // size of fixed outgoing argument space #endif // FEATURE_FIXED_OUT_ARGS static unsigned GetOutgoingArgByteSize(unsigned sizeWithoutPadding) { return roundUp(sizeWithoutPadding, TARGET_POINTER_SIZE); } // Variable representing the return address. The helper-based tailcall // mechanism passes the address of the return address to a runtime helper // where it is used to detect tail-call chains. unsigned lvaRetAddrVar; #if defined(DEBUG) && defined(TARGET_XARCH) unsigned lvaReturnSpCheck; // Stores SP to confirm it is not corrupted on return. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) unsigned lvaCallSpCheck; // Stores SP to confirm it is not corrupted after every call. #endif // defined(DEBUG) && defined(TARGET_X86) bool lvaGenericsContextInUse; bool lvaKeepAliveAndReportThis(); // Synchronized instance method of a reference type, or // CORINFO_GENERICS_CTXT_FROM_THIS? bool lvaReportParamTypeArg(); // Exceptions and CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG? //------------------------------------------------------------------------- // All these frame offsets are inter-related and must be kept in sync #if !defined(FEATURE_EH_FUNCLETS) // This is used for the callable handlers unsigned lvaShadowSPslotsVar; // TYP_BLK variable for all the shadow SP slots #endif // FEATURE_EH_FUNCLETS int lvaCachedGenericContextArgOffs; int lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as // THIS pointer #ifdef JIT32_GCENCODER unsigned lvaLocAllocSPvar; // variable which stores the value of ESP after the the last alloca/localloc #endif // JIT32_GCENCODER unsigned lvaNewObjArrayArgs; // variable with arguments for new MD array helper // TODO-Review: Prior to reg predict we reserve 24 bytes for Spill temps. // after the reg predict we will use a computed maxTmpSize // which is based upon the number of spill temps predicted by reg predict // All this is necessary because if we under-estimate the size of the spill // temps we could fail when encoding instructions that reference stack offsets for ARM. // // Pre codegen max spill temp size. static const unsigned MAX_SPILL_TEMP_SIZE = 24; //------------------------------------------------------------------------- unsigned lvaGetMaxSpillTempSize(); #ifdef TARGET_ARM bool lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask); #endif // TARGET_ARM void lvaAssignFrameOffsets(FrameLayoutState curState); void lvaFixVirtualFrameOffsets(); void lvaUpdateArgWithInitialReg(LclVarDsc* varDsc); void lvaUpdateArgsWithInitialReg(); void lvaAssignVirtualFrameOffsetsToArgs(); #ifdef UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs, int* callerArgOffset); #else // !UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs); #endif // !UNIX_AMD64_ABI void lvaAssignVirtualFrameOffsetsToLocals(); int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs); #ifdef TARGET_AMD64 // Returns true if compCalleeRegsPushed (including RBP if used as frame pointer) is even. bool lvaIsCalleeSavedIntRegCountEven(); #endif void lvaAlignFrame(); void lvaAssignFrameOffsetsToPromotedStructs(); int lvaAllocateTemps(int stkOffs, bool mustDoubleAlign); #ifdef DEBUG void lvaDumpRegLocation(unsigned lclNum); void lvaDumpFrameLocation(unsigned lclNum); void lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth = 6); void lvaTableDump(FrameLayoutState curState = NO_FRAME_LAYOUT); // NO_FRAME_LAYOUT means use the current frame // layout state defined by lvaDoneFrameLayout #endif // Limit frames size to 1GB. The maximum is 2GB in theory - make it intentionally smaller // to avoid bugs from borderline cases. #define MAX_FrameSize 0x3FFFFFFF void lvaIncrementFrameSize(unsigned size); unsigned lvaFrameSize(FrameLayoutState curState); // Returns the caller-SP-relative offset for the SP/FP relative offset determined by FP based. int lvaToCallerSPRelativeOffset(int offs, bool isFpBased, bool forRootFrame = true) const; // Returns the caller-SP-relative offset for the local variable "varNum." int lvaGetCallerSPRelativeOffset(unsigned varNum); // Returns the SP-relative offset for the local variable "varNum". Illegal to ask this for functions with localloc. int lvaGetSPRelativeOffset(unsigned varNum); int lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased); int lvaGetInitialSPRelativeOffset(unsigned varNum); // True if this is an OSR compilation and this local is potentially // located on the original method stack frame. bool lvaIsOSRLocal(unsigned varNum); //------------------------ For splitting types ---------------------------- void lvaInitTypeRef(); void lvaInitArgs(InitVarDscInfo* varDscInfo); void lvaInitThisPtr(InitVarDscInfo* varDscInfo); void lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg); void lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs); void lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo); void lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo); void lvaInitVarDsc(LclVarDsc* varDsc, unsigned varNum, CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd, CORINFO_ARG_LIST_HANDLE varList, CORINFO_SIG_INFO* varSig); static unsigned lvaTypeRefMask(var_types type); var_types lvaGetActualType(unsigned lclNum); var_types lvaGetRealType(unsigned lclNum); //------------------------------------------------------------------------- void lvaInit(); LclVarDsc* lvaGetDesc(unsigned lclNum) { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(unsigned lclNum) const { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(const GenTreeLclVarCommon* lclVar) { return lvaGetDesc(lclVar->GetLclNum()); } unsigned lvaTrackedIndexToLclNum(unsigned trackedIndex) { assert(trackedIndex < lvaTrackedCount); unsigned lclNum = lvaTrackedToVarNum[trackedIndex]; assert(lclNum < lvaCount); return lclNum; } LclVarDsc* lvaGetDescByTrackedIndex(unsigned trackedIndex) { return lvaGetDesc(lvaTrackedIndexToLclNum(trackedIndex)); } unsigned lvaGetLclNum(const LclVarDsc* varDsc) { assert((lvaTable <= varDsc) && (varDsc < lvaTable + lvaCount)); // varDsc must point within the table assert(((char*)varDsc - (char*)lvaTable) % sizeof(LclVarDsc) == 0); // varDsc better not point in the middle of a variable unsigned varNum = (unsigned)(varDsc - lvaTable); assert(varDsc == &lvaTable[varNum]); return varNum; } unsigned lvaLclSize(unsigned varNum); unsigned lvaLclExactSize(unsigned varNum); bool lvaHaveManyLocals() const; unsigned lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason)); unsigned lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason)); unsigned lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason)); void lvaSortByRefCount(); void lvaMarkLocalVars(); // Local variable ref-counting void lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers); void lvaMarkLocalVars(BasicBlock* block, bool isRecompute); void lvaAllocOutgoingArgSpaceVar(); // Set up lvaOutgoingArgSpaceVar VARSET_VALRET_TP lvaStmtLclMask(Statement* stmt); #ifdef DEBUG struct lvaStressLclFldArgs { Compiler* m_pCompiler; bool m_bFirstPass; }; static fgWalkPreFn lvaStressLclFldCB; void lvaStressLclFld(); void lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars); void lvaDispVarSet(VARSET_VALARG_TP set); #endif #ifdef TARGET_ARM int lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage); #else int lvaFrameAddress(int varNum, bool* pFPbased); #endif bool lvaIsParameter(unsigned varNum); bool lvaIsRegArgument(unsigned varNum); bool lvaIsOriginalThisArg(unsigned varNum); // Is this varNum the original this argument? bool lvaIsOriginalThisReadOnly(); // return true if there is no place in the code // that writes to arg0 // For x64 this is 3, 5, 6, 7, >8 byte structs that are passed by reference. // For ARM64, this is structs larger than 16 bytes that are passed by reference. bool lvaIsImplicitByRefLocal(unsigned varNum) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) LclVarDsc* varDsc = lvaGetDesc(varNum); if (varDsc->lvIsImplicitByRef) { assert(varDsc->lvIsParam); assert(varTypeIsStruct(varDsc) || (varDsc->lvType == TYP_BYREF)); return true; } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) return false; } // Returns true if this local var is a multireg struct bool lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVararg); // If the local is a TYP_STRUCT, get/set a class handle describing it CORINFO_CLASS_HANDLE lvaGetStruct(unsigned varNum); void lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo = true); void lvaSetStructUsedAsVarArg(unsigned varNum); // If the local is TYP_REF, set or update the associated class information. void lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); void lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); #define MAX_NumOfFieldsInPromotableStruct 4 // Maximum number of fields in promotable struct // Info about struct type fields. struct lvaStructFieldInfo { CORINFO_FIELD_HANDLE fldHnd; unsigned char fldOffset; unsigned char fldOrdinal; var_types fldType; unsigned fldSize; CORINFO_CLASS_HANDLE fldTypeHnd; lvaStructFieldInfo() : fldHnd(nullptr), fldOffset(0), fldOrdinal(0), fldType(TYP_UNDEF), fldSize(0), fldTypeHnd(nullptr) { } }; // Info about a struct type, instances of which may be candidates for promotion. struct lvaStructPromotionInfo { CORINFO_CLASS_HANDLE typeHnd; bool canPromote; bool containsHoles; bool customLayout; bool fieldsSorted; unsigned char fieldCnt; lvaStructFieldInfo fields[MAX_NumOfFieldsInPromotableStruct]; lvaStructPromotionInfo(CORINFO_CLASS_HANDLE typeHnd = nullptr) : typeHnd(typeHnd) , canPromote(false) , containsHoles(false) , customLayout(false) , fieldsSorted(false) , fieldCnt(0) { } }; struct lvaFieldOffsetCmp { bool operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2); }; // This class is responsible for checking validity and profitability of struct promotion. // If it is both legal and profitable, then TryPromoteStructVar promotes the struct and initializes // nessesary information for fgMorphStructField to use. class StructPromotionHelper { public: StructPromotionHelper(Compiler* compiler); bool CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd); bool TryPromoteStructVar(unsigned lclNum); void Clear() { structPromotionInfo.typeHnd = NO_CLASS_HANDLE; } #ifdef DEBUG void CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType); #endif // DEBUG private: bool CanPromoteStructVar(unsigned lclNum); bool ShouldPromoteStructVar(unsigned lclNum); void PromoteStructVar(unsigned lclNum); void SortStructFields(); bool CanConstructAndPromoteField(lvaStructPromotionInfo* structPromotionInfo); lvaStructFieldInfo GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal); bool TryPromoteStructField(lvaStructFieldInfo& outerFieldInfo); private: Compiler* compiler; lvaStructPromotionInfo structPromotionInfo; #ifdef DEBUG typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<CORINFO_FIELD_STRUCT_>, var_types> RetypedAsScalarFieldsMap; RetypedAsScalarFieldsMap retypedFieldsMap; #endif // DEBUG }; StructPromotionHelper* structPromotionHelper; unsigned lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset); lvaPromotionType lvaGetPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetPromotionType(unsigned varNum); lvaPromotionType lvaGetParentPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetParentPromotionType(unsigned varNum); bool lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc); bool lvaIsGCTracked(const LclVarDsc* varDsc); #if defined(FEATURE_SIMD) bool lvaMapSimd12ToSimd16(const LclVarDsc* varDsc) { assert(varDsc->lvType == TYP_SIMD12); assert(varDsc->lvExactSize == 12); #if defined(TARGET_64BIT) assert(compMacOsArm64Abi() || varDsc->lvSize() == 16); #endif // defined(TARGET_64BIT) // We make local variable SIMD12 types 16 bytes instead of just 12. // lvSize() will return 16 bytes for SIMD12, even for fields. // However, we can't do that mapping if the var is a dependently promoted struct field. // Such a field must remain its exact size within its parent struct unless it is a single // field *and* it is the only field in a struct of 16 bytes. if (varDsc->lvSize() != 16) { return false; } if (lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { LclVarDsc* parentVarDsc = lvaGetDesc(varDsc->lvParentLcl); return (parentVarDsc->lvFieldCnt == 1) && (parentVarDsc->lvSize() == 16); } return true; } #endif // defined(FEATURE_SIMD) unsigned lvaGSSecurityCookie; // LclVar number bool lvaTempsHaveLargerOffsetThanVars(); // Returns "true" iff local variable "lclNum" is in SSA form. bool lvaInSsa(unsigned lclNum) { assert(lclNum < lvaCount); return lvaTable[lclNum].lvInSsa; } unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX #if defined(FEATURE_EH_FUNCLETS) unsigned lvaPSPSym; // variable representing the PSPSym #endif InlineInfo* impInlineInfo; // Only present for inlinees InlineStrategy* m_inlineStrategy; InlineContext* compInlineContext; // Always present // The Compiler* that is the root of the inlining tree of which "this" is a member. Compiler* impInlineRoot(); #if defined(DEBUG) || defined(INLINE_DATA) unsigned __int64 getInlineCycleCount() { return m_compCycles; } #endif // defined(DEBUG) || defined(INLINE_DATA) bool fgNoStructPromotion; // Set to TRUE to turn off struct promotion for this method. bool fgNoStructParamPromotion; // Set to TRUE to turn off struct promotion for parameters this method. //========================================================================= // PROTECTED //========================================================================= protected: //---------------- Local variable ref-counting ---------------------------- void lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute); bool IsDominatedByExceptionalEntry(BasicBlock* block); void SetVolatileHint(LclVarDsc* varDsc); // Keeps the mapping from SSA #'s to VN's for the implicit memory variables. SsaDefArray<SsaMemDef> lvMemoryPerSsaData; public: // Returns the address of the per-Ssa data for memory at the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). SsaMemDef* GetMemoryPerSsaData(unsigned ssaNum) { return lvMemoryPerSsaData.GetSsaDef(ssaNum); } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ private: // For prefixFlags enum { PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix PREFIX_TAILCALL_IMPLICIT = 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix PREFIX_TAILCALL_STRESS = 0x00000100, // call doesn't "tail" IL prefix but is treated as explicit because of tail call stress PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT | PREFIX_TAILCALL_STRESS), PREFIX_VOLATILE = 0x00001000, PREFIX_UNALIGNED = 0x00010000, PREFIX_CONSTRAINED = 0x00100000, PREFIX_READONLY = 0x01000000 }; static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix); static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp); static bool impOpcodeIsCallOpcode(OPCODE opcode); public: void impInit(); void impImport(); CORINFO_CLASS_HANDLE impGetRefAnyClass(); CORINFO_CLASS_HANDLE impGetRuntimeArgumentHandle(); CORINFO_CLASS_HANDLE impGetTypeHandleClass(); CORINFO_CLASS_HANDLE impGetStringClass(); CORINFO_CLASS_HANDLE impGetObjectClass(); // Returns underlying type of handles returned by ldtoken instruction var_types GetRuntimeHandleUnderlyingType() { // RuntimeTypeHandle is backed by raw pointer on CoreRT and by object reference on other runtimes return IsTargetAbi(CORINFO_CORERT_ABI) ? TYP_I_IMPL : TYP_REF; } void impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* contextHandle, CORINFO_CONTEXT_HANDLE* exactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset = BAD_IL_OFFSET); //========================================================================= // PROTECTED //========================================================================= protected: //-------------------- Stack manipulation --------------------------------- unsigned impStkSize; // Size of the full stack #define SMALL_STACK_SIZE 16 // number of elements in impSmallStack struct SavedStack // used to save/restore stack contents. { unsigned ssDepth; // number of values on stack StackEntry* ssTrees; // saved tree values }; bool impIsPrimitive(CorInfoType type); bool impILConsumesAddr(const BYTE* codeAddr); void impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind); void impPushOnStack(GenTree* tree, typeInfo ti); void impPushNullObjRefOnStack(); StackEntry impPopStack(); StackEntry& impStackTop(unsigned n = 0); unsigned impStackHeight(); void impSaveStackState(SavedStack* savePtr, bool copy); void impRestoreStackState(SavedStack* savePtr); GenTree* impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); int impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation = false); void impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken); void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); bool impCanPInvokeInline(); bool impCanPInvokeInlineCallSite(BasicBlock* block); void impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block); GenTreeCall* impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di = DebugInfo()); void impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig); void impInsertHelperCall(CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); var_types impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a // type parameter? GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset); CORINFO_CLASS_HANDLE impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE specialIntrinsicHandle); bool impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv); GenTree* impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd); GenTree* impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv); #ifdef DEBUG var_types impImportJitTestLabelMark(int numArgs); #endif // DEBUG GenTree* impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken); GenTree* impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp); GenTree* impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp); static void impBashVarAddrsToI(GenTree* tree1, GenTree* tree2 = nullptr); GenTree* impImplicitIorI4Cast(GenTree* tree, var_types dstTyp); GenTree* impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp); void impImportLeave(BasicBlock* block); void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr); GenTree* impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom); // Mirrors StringComparison.cs enum StringComparison { Ordinal = 4, OrdinalIgnoreCase = 5 }; enum StringComparisonJoint { Eq, // (d1 == cns1) && (s2 == cns2) Xor, // (d1 ^ cns1) | (s2 ^ cns2) }; GenTree* impStringEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impExpandHalfConstEquals(GenTreeLclVar* data, GenTree* lengthFld, bool checkForNull, bool startsWith, WCHAR* cnsData, int len, int dataOffset, StringComparison cmpMode); GenTree* impCreateCompareInd(GenTreeLclVar* obj, var_types type, ssize_t offset, ssize_t value, StringComparison ignoreCase, StringComparisonJoint joint = Eq); GenTree* impExpandHalfConstEqualsSWAR( GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset, StringComparison cmpMode); GenTree* impExpandHalfConstEqualsSIMD( GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset, StringComparison cmpMode); GenTreeStrCon* impGetStrConFromSpan(GenTree* span); GenTree* impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pContstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic = nullptr); GenTree* impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall); NamedIntrinsic lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method); GenTree* impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); #ifdef FEATURE_HW_INTRINSICS GenTree* impHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); GenTree* impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, GenTree* newobjThis); protected: bool compSupportsHWIntrinsic(CORINFO_InstructionSet isa); GenTree* impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, var_types retType, CorInfoType simdBaseJitType, unsigned simdSize, GenTree* newobjThis); GenTree* impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* getArgForHWIntrinsic(var_types argType, CORINFO_CLASS_HANDLE argClass, bool expectAddr = false, GenTree* newobjThis = nullptr); GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType); GenTree* addRangeCheckIfNeeded( NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound); GenTree* addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound); #ifdef TARGET_XARCH GenTree* impBaseIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impBMI1OrBMI2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); #endif // TARGET_XARCH #endif // FEATURE_HW_INTRINSICS GenTree* impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName); GenTree* impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impKeepAliveIntrinsic(GenTree* objToKeepAlive); GenTree* impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform); //----------------- Manipulating the trees and stmts ---------------------- Statement* impStmtList; // Statements for the BB being imported. Statement* impLastStmt; // The last statement for the current BB. public: enum { CHECK_SPILL_ALL = -1, CHECK_SPILL_NONE = -2 }; void impBeginTreeList(); void impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt); void impEndTreeList(BasicBlock* block); void impAppendStmtCheck(Statement* stmt, unsigned chkLevel); void impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo = true); void impAppendStmt(Statement* stmt); void impInsertStmtBefore(Statement* stmt, Statement* stmtBefore); Statement* impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo = true); void impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore); void impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel = (unsigned)CHECK_SPILL_NONE, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); void impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); Statement* impExtractLastStmt(); GenTree* impCloneExpr(GenTree* tree, GenTree** clone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)); GenTree* impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impAssignStructPtr(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref); var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* simdBaseJitType = nullptr); GenTree* impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization = false); GenTree* impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false, bool importParent = false); GenTree* impParentClassTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false) { return impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, true); } GenTree* impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTree* getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind); GenTree* impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTreeCall* impReadyToRunHelperToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args = nullptr, CORINFO_LOOKUP_KIND* pGenericLookupKind = nullptr); bool impIsCastHelperEligibleForClassProbe(GenTree* tree); bool impIsCastHelperMayHaveProfileData(GenTree* tree); GenTree* impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset); GenTree* impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass); bool VarTypeIsMultiByteAndCanEnreg(var_types type, CORINFO_CLASS_HANDLE typeClass, unsigned* typeSize, bool forReturn, bool isVarArg, CorInfoCallConvExtension callConv); bool IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName); bool IsTargetIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(GenTree* tree); private: //----------------- Importing the method ---------------------------------- CORINFO_CONTEXT_HANDLE impTokenLookupContextHandle; // The context used for looking up tokens. #ifdef DEBUG unsigned impCurOpcOffs; const char* impCurOpcName; bool impNestedStackSpill; // For displaying instrs with generated native code (-n:B) Statement* impLastILoffsStmt; // oldest stmt added for which we did not call SetLastILOffset(). void impNoteLastILoffs(); #endif // Debug info of current statement being imported. It gets set to contain // no IL location (!impCurStmtDI.GetLocation().IsValid) after it has been // set in the appended trees. Then it gets updated at IL instructions for // which we have to report mapping info. // It will always contain the current inline context. DebugInfo impCurStmtDI; DebugInfo impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall); void impCurStmtOffsSet(IL_OFFSET offs); void impNoteBranchOffs(); unsigned impInitBlockLineInfo(); bool impIsThis(GenTree* obj); bool impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsAnySTLOC(OPCODE opcode) { return ((opcode == CEE_STLOC) || (opcode == CEE_STLOC_S) || ((opcode >= CEE_STLOC_0) && (opcode <= CEE_STLOC_3))); } GenTreeCall::Use* impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs = nullptr); bool impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const; GenTreeCall::Use* impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount = 0); //---------------- Spilling the importer stack ---------------------------- // The maximum number of bytes of IL processed without clean stack state. // It allows to limit the maximum tree size and depth. static const unsigned MAX_TREE_SIZE = 200; bool impCanSpillNow(OPCODE prevOpcode); struct PendingDsc { PendingDsc* pdNext; BasicBlock* pdBB; SavedStack pdSavedStack; ThisInitState pdThisPtrInit; }; PendingDsc* impPendingList; // list of BBs currently waiting to be imported. PendingDsc* impPendingFree; // Freed up dscs that can be reused // We keep a byte-per-block map (dynamically extended) in the top-level Compiler object of a compilation. JitExpandArray<BYTE> impPendingBlockMembers; // Return the byte for "b" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. BYTE impGetPendingBlockMember(BasicBlock* blk) { return impInlineRoot()->impPendingBlockMembers.Get(blk->bbInd()); } // Set the byte for "b" to "val" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. void impSetPendingBlockMember(BasicBlock* blk, BYTE val) { impInlineRoot()->impPendingBlockMembers.Set(blk->bbInd(), val); } bool impCanReimport; bool impSpillStackEntry(unsigned level, unsigned varNum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ); void impSpillStackEnsure(bool spillLeaves = false); void impEvalSideEffects(); void impSpillSpecialSideEff(); void impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)); void impSpillValueClasses(); void impSpillEvalStack(); static fgWalkPreFn impFindValueClasses; void impSpillLclRefs(ssize_t lclNum); BasicBlock* impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter); bool impBlockIsInALoop(BasicBlock* block); void impImportBlockCode(BasicBlock* block); void impReimportMarkBlock(BasicBlock* block); void impReimportMarkSuccessors(BasicBlock* block); void impVerifyEHBlock(BasicBlock* block, bool isTryStart); void impImportBlockPending(BasicBlock* block); // Similar to impImportBlockPending, but assumes that block has already been imported once and is being // reimported for some reason. It specifically does *not* look at verCurrentState to set the EntryState // for the block, but instead, just re-uses the block's existing EntryState. void impReimportBlockPending(BasicBlock* block); var_types impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2); void impImportBlock(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We will assign the values // on the stack to local variables (the "spill temp" variables). The successor blocks will assume that // its incoming stack contents are in those locals. This requires "block" and its successors to agree on // the variables that will be used -- and for all the predecessors of those successors, and the // successors of those predecessors, etc. Call such a set of blocks closed under alternating // successor/predecessor edges a "spill clique." A block is a "predecessor" or "successor" member of the // clique (or, conceivably, both). Each block has a specified sequence of incoming and outgoing spill // temps. If "block" already has its outgoing spill temps assigned (they are always a contiguous series // of local variable numbers, so we represent them with the base local variable number), returns that. // Otherwise, picks a set of spill temps, and propagates this choice to all blocks in the spill clique of // which "block" is a member (asserting, in debug mode, that no block in this clique had its spill temps // chosen already. More precisely, that the incoming or outgoing spill temps are not chosen, depending // on which kind of member of the clique the block is). unsigned impGetSpillTmpBase(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We have previously // assigned the values on the stack to local variables (the "spill temp" variables). The successor blocks // will assume that its incoming stack contents are in those locals. This requires "block" and its // successors to agree on the variables and their types that will be used. The CLI spec allows implicit // conversions between 'int' and 'native int' or 'float' and 'double' stack types. So one predecessor can // push an int and another can push a native int. For 64-bit we have chosen to implement this by typing // the "spill temp" as native int, and then importing (or re-importing as needed) so that all the // predecessors in the "spill clique" push a native int (sign-extending if needed), and all the // successors receive a native int. Similarly float and double are unified to double. // This routine is called after a type-mismatch is detected, and it will walk the spill clique to mark // blocks for re-importation as appropriate (both successors, so they get the right incoming type, and // predecessors, so they insert an upcast if needed). void impReimportSpillClique(BasicBlock* block); // When we compute a "spill clique" (see above) these byte-maps are allocated to have a byte per basic // block, and represent the predecessor and successor members of the clique currently being computed. // *** Access to these will need to be locked in a parallel compiler. JitExpandArray<BYTE> impSpillCliquePredMembers; JitExpandArray<BYTE> impSpillCliqueSuccMembers; enum SpillCliqueDir { SpillCliquePred, SpillCliqueSucc }; // Abstract class for receiving a callback while walking a spill clique class SpillCliqueWalker { public: virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) = 0; }; // This class is used for setting the bbStkTempsIn and bbStkTempsOut on the blocks within a spill clique class SetSpillTempsBase : public SpillCliqueWalker { unsigned m_baseTmp; public: SetSpillTempsBase(unsigned baseTmp) : m_baseTmp(baseTmp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This class is used for implementing impReimportSpillClique part on each block within the spill clique class ReimportSpillClique : public SpillCliqueWalker { Compiler* m_pComp; public: ReimportSpillClique(Compiler* pComp) : m_pComp(pComp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This is the heart of the algorithm for walking spill cliques. It invokes callback->Visit for each // predecessor or successor within the spill clique void impWalkSpillCliqueFromPred(BasicBlock* pred, SpillCliqueWalker* callback); // For a BasicBlock that has already been imported, the EntryState has an array of GenTrees for the // incoming locals. This walks that list an resets the types of the GenTrees to match the types of // the VarDscs. They get out of sync when we have int/native int issues (see impReimportSpillClique). void impRetypeEntryStateTemps(BasicBlock* blk); BYTE impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk); void impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val); void impPushVar(GenTree* op, typeInfo tiRetVal); GenTreeLclVar* impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)); void impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal); void impLoadVar(unsigned lclNum, IL_OFFSET offset) { impLoadVar(lclNum, offset, lvaGetDesc(lclNum)->lvVerTypeInfo); } void impLoadArg(unsigned ilArgNum, IL_OFFSET offset); void impLoadLoc(unsigned ilLclNum, IL_OFFSET offset); bool impReturnInstruction(int prefixFlags, OPCODE& opcode); #ifdef TARGET_ARM void impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* op, CORINFO_CLASS_HANDLE hClass); #endif // A free list of linked list nodes used to represent to-do stacks of basic blocks. struct BlockListNode { BasicBlock* m_blk; BlockListNode* m_next; BlockListNode(BasicBlock* blk, BlockListNode* next = nullptr) : m_blk(blk), m_next(next) { } void* operator new(size_t sz, Compiler* comp); }; BlockListNode* impBlockListNodeFreeList; void FreeBlockListNode(BlockListNode* node); bool impIsValueType(typeInfo* pTypeInfo); var_types mangleVarArgsType(var_types type); regNumber getCallArgIntRegister(regNumber floatReg); regNumber getCallArgFloatRegister(regNumber intReg); #if defined(DEBUG) static unsigned jitTotalMethodCompiled; #endif #ifdef DEBUG static LONG jitNestingLevel; #endif // DEBUG static bool impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut = nullptr); void impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult); // STATIC inlining decision based on the IL code. void impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult); void impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult); void impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult); void impInlineInitVars(InlineInfo* pInlineInfo); unsigned impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)); GenTree* impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclTypeInfo); bool impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo); bool impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo); void impMarkInlineCandidate(GenTree* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); void impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); bool impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv); bool impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive); bool impIsImplicitTailCallCandidate( OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive); bool impIsClassExact(CORINFO_CLASS_HANDLE classHnd); bool impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array); CORINFO_RESOLVED_TOKEN* impAllocateToken(const CORINFO_RESOLVED_TOKEN& token); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX FlowGraph XX XX XX XX Info about the basic-blocks, their contents and the flow analysis XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: BasicBlock* fgFirstBB; // Beginning of the basic block list BasicBlock* fgLastBB; // End of the basic block list BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section BasicBlock* fgEntryBB; // For OSR, the original method's entry point BasicBlock* fgOSREntryBB; // For OSR, the logical entry point (~ patchpoint) #if defined(FEATURE_EH_FUNCLETS) BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets) #endif BasicBlock* fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been // created. BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks unsigned fgEdgeCount; // # of control flow edges between the BBs unsigned fgBBcount; // # of BBs in the method #ifdef DEBUG unsigned fgBBcountAtCodegen; // # of BBs in the method at the start of codegen #endif unsigned fgBBNumMax; // The max bbNum that has been assigned to basic blocks unsigned fgDomBBcount; // # of BBs for which we have dominator and reachability information BasicBlock** fgBBInvPostOrder; // The flow graph stored in an array sorted in topological order, needed to compute // dominance. Indexed by block number. Size: fgBBNumMax + 1. // After the dominance tree is computed, we cache a DFS preorder number and DFS postorder number to compute // dominance queries in O(1). fgDomTreePreOrder and fgDomTreePostOrder are arrays giving the block's preorder and // postorder number, respectively. The arrays are indexed by basic block number. (Note that blocks are numbered // starting from one. Thus, we always waste element zero. This makes debugging easier and makes the code less likely // to suffer from bugs stemming from forgetting to add or subtract one from the block number to form an array // index). The arrays are of size fgBBNumMax + 1. unsigned* fgDomTreePreOrder; unsigned* fgDomTreePostOrder; // Dominator tree used by SSA construction and copy propagation (the two are expected to use the same tree // in order to avoid the need for SSA reconstruction and an "out of SSA" phase). DomTreeNode* fgSsaDomTree; bool fgBBVarSetsInited; // Allocate array like T* a = new T[fgBBNumMax + 1]; // Using helper so we don't keep forgetting +1. template <typename T> T* fgAllocateTypeForEachBlk(CompMemKind cmk = CMK_Unknown) { return getAllocator(cmk).allocate<T>(fgBBNumMax + 1); } // BlockSets are relative to a specific set of BasicBlock numbers. If that changes // (if the blocks are renumbered), this changes. BlockSets from different epochs // cannot be meaningfully combined. Note that new blocks can be created with higher // block numbers without changing the basic block epoch. These blocks *cannot* // participate in a block set until the blocks are all renumbered, causing the epoch // to change. This is useful if continuing to use previous block sets is valuable. // If the epoch is zero, then it is uninitialized, and block sets can't be used. unsigned fgCurBBEpoch; unsigned GetCurBasicBlockEpoch() { return fgCurBBEpoch; } // The number of basic blocks in the current epoch. When the blocks are renumbered, // this is fgBBcount. As blocks are added, fgBBcount increases, fgCurBBEpochSize remains // the same, until a new BasicBlock epoch is created, such as when the blocks are all renumbered. unsigned fgCurBBEpochSize; // The number of "size_t" elements required to hold a bitset large enough for fgCurBBEpochSize // bits. This is precomputed to avoid doing math every time BasicBlockBitSetTraits::GetArrSize() is called. unsigned fgBBSetCountInSizeTUnits; void NewBasicBlockEpoch() { INDEBUG(unsigned oldEpochArrSize = fgBBSetCountInSizeTUnits); // We have a new epoch. Compute and cache the size needed for new BlockSets. fgCurBBEpoch++; fgCurBBEpochSize = fgBBNumMax + 1; fgBBSetCountInSizeTUnits = roundUp(fgCurBBEpochSize, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); #ifdef DEBUG // All BlockSet objects are now invalid! fgReachabilitySetsValid = false; // the bbReach sets are now invalid! fgEnterBlksSetValid = false; // the fgEnterBlks set is now invalid! if (verbose) { unsigned epochArrSize = BasicBlockBitSetTraits::GetArrSize(this, sizeof(size_t)); printf("\nNew BlockSet epoch %d, # of blocks (including unused BB00): %u, bitset array size: %u (%s)", fgCurBBEpoch, fgCurBBEpochSize, epochArrSize, (epochArrSize <= 1) ? "short" : "long"); if ((fgCurBBEpoch != 1) && ((oldEpochArrSize <= 1) != (epochArrSize <= 1))) { // If we're not just establishing the first epoch, and the epoch array size has changed such that we're // going to change our bitset representation from short (just a size_t bitset) to long (a pointer to an // array of size_t bitsets), then print that out. printf("; NOTE: BlockSet size was previously %s!", (oldEpochArrSize <= 1) ? "short" : "long"); } printf("\n"); } #endif // DEBUG } void EnsureBasicBlockEpoch() { if (fgCurBBEpochSize != fgBBNumMax + 1) { NewBasicBlockEpoch(); } } BasicBlock* fgNewBasicBlock(BBjumpKinds jumpKind); void fgEnsureFirstBBisScratch(); bool fgFirstBBisScratch(); bool fgBBisScratch(BasicBlock* block); void fgExtendEHRegionBefore(BasicBlock* block); void fgExtendEHRegionAfter(BasicBlock* block); BasicBlock* fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, unsigned tryIndex, unsigned hndIndex, BasicBlock* nearBlk, bool putInFilter = false, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, BasicBlock* srcBlk, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind); BasicBlock* fgNewBBinRegionWorker(BBjumpKinds jumpKind, BasicBlock* afterBlk, unsigned xcptnIndex, bool putInTryRegion); void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk); void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk); void fgUnlinkBlock(BasicBlock* block); #ifdef FEATURE_JIT_METHOD_PERF unsigned fgMeasureIR(); #endif // FEATURE_JIT_METHOD_PERF bool fgModified; // True if the flow graph has been modified recently bool fgComputePredsDone; // Have we computed the bbPreds list bool fgCheapPredsValid; // Is the bbCheapPreds list valid? bool fgDomsComputed; // Have we computed the dominator sets? bool fgReturnBlocksComputed; // Have we computed the return blocks list? bool fgOptimizedFinally; // Did we optimize any try-finallys? bool fgHasSwitch; // any BBJ_SWITCH jumps? BlockSet fgEnterBlks; // Set of blocks which have a special transfer of control; the "entry" blocks plus EH handler // begin blocks. #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) BlockSet fgAlwaysBlks; // Set of blocks which are BBJ_ALWAYS part of BBJ_CALLFINALLY/BBJ_ALWAYS pair that should // never be removed due to a requirement to use the BBJ_ALWAYS for generating code and // not have "retless" blocks. #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef DEBUG bool fgReachabilitySetsValid; // Are the bbReach sets valid? bool fgEnterBlksSetValid; // Is the fgEnterBlks set valid? #endif // DEBUG bool fgRemoveRestOfBlock; // true if we know that we will throw bool fgStmtRemoved; // true if we remove statements -> need new DFA // There are two modes for ordering of the trees. // - In FGOrderTree, the dominant ordering is the tree order, and the nodes contained in // each tree and sub-tree are contiguous, and can be traversed (in gtNext/gtPrev order) // by traversing the tree according to the order of the operands. // - In FGOrderLinear, the dominant ordering is the linear order. enum FlowGraphOrder { FGOrderTree, FGOrderLinear }; FlowGraphOrder fgOrder; // The following are boolean flags that keep track of the state of internal data structures bool fgStmtListThreaded; // true if the node list is now threaded bool fgCanRelocateEHRegions; // true if we are allowed to relocate the EH regions bool fgEdgeWeightsComputed; // true after we have called fgComputeEdgeWeights bool fgHaveValidEdgeWeights; // true if we were successful in computing all of the edge weights bool fgSlopUsedInEdgeWeights; // true if their was some slop used when computing the edge weights bool fgRangeUsedInEdgeWeights; // true if some of the edgeWeight are expressed in Min..Max form bool fgNeedsUpdateFlowGraph; // true if we need to run fgUpdateFlowGraph weight_t fgCalledCount; // count of the number of times this method was called // This is derived from the profile data // or is BB_UNITY_WEIGHT when we don't have profile data #if defined(FEATURE_EH_FUNCLETS) bool fgFuncletsCreated; // true if the funclet creation phase has been run #endif // FEATURE_EH_FUNCLETS bool fgGlobalMorph; // indicates if we are during the global morphing phase // since fgMorphTree can be called from several places bool impBoxTempInUse; // the temp below is valid and available unsigned impBoxTemp; // a temporary that is used for boxing #ifdef DEBUG bool jitFallbackCompile; // Are we doing a fallback compile? That is, have we executed a NO_WAY assert, // and we are trying to compile again in a "safer", minopts mode? #endif #if defined(DEBUG) unsigned impInlinedCodeSize; bool fgPrintInlinedMethods; #endif jitstd::vector<flowList*>* fgPredListSortVector; //------------------------------------------------------------------------- void fgInit(); PhaseStatus fgImport(); PhaseStatus fgTransformIndirectCalls(); PhaseStatus fgTransformPatchpoints(); PhaseStatus fgInline(); PhaseStatus fgRemoveEmptyTry(); PhaseStatus fgRemoveEmptyFinally(); PhaseStatus fgMergeFinallyChains(); PhaseStatus fgCloneFinally(); void fgCleanupContinuation(BasicBlock* continuation); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgUpdateFinallyTargetFlags(); void fgClearAllFinallyTargetBits(); void fgAddFinallyTargetFlags(); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgTailMergeThrows(); void fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); void fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); GenTree* fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Sometimes we need to defer updating the BBF_FINALLY_TARGET bit. fgNeedToAddFinallyTargetBits signals // when this is necessary. bool fgNeedToAddFinallyTargetBits; #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) bool fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, BasicBlock* handler, BlockToBlockMap& continuationMap); GenTree* fgGetCritSectOfStaticMethod(); #if defined(FEATURE_EH_FUNCLETS) void fgAddSyncMethodEnterExit(); GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter); void fgConvertSyncReturnToLeave(BasicBlock* block); #endif // FEATURE_EH_FUNCLETS void fgAddReversePInvokeEnterExit(); bool fgMoreThanOneReturnBlock(); // The number of separate return points in the method. unsigned fgReturnCount; void fgAddInternal(); enum class FoldResult { FOLD_DID_NOTHING, FOLD_CHANGED_CONTROL_FLOW, FOLD_REMOVED_LAST_STMT, FOLD_ALTERED_LAST_STMT, }; FoldResult fgFoldConditional(BasicBlock* block); void fgMorphStmts(BasicBlock* block); void fgMorphBlocks(); void fgMergeBlockReturn(BasicBlock* block); bool fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg)); void fgSetOptions(); #ifdef DEBUG static fgWalkPreFn fgAssertNoQmark; void fgPreExpandQmarkChecks(GenTree* expr); void fgPostExpandQmarkChecks(); static void fgCheckQmarkAllowedForm(GenTree* tree); #endif IL_OFFSET fgFindBlockILOffset(BasicBlock* block); void fgFixEntryFlowForOSR(); BasicBlock* fgSplitBlockAtBeginning(BasicBlock* curr); BasicBlock* fgSplitBlockAtEnd(BasicBlock* curr); BasicBlock* fgSplitBlockAfterStatement(BasicBlock* curr, Statement* stmt); BasicBlock* fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node); // for LIR BasicBlock* fgSplitEdge(BasicBlock* curr, BasicBlock* succ); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block, const DebugInfo& di); Statement* fgNewStmtFromTree(GenTree* tree); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block); Statement* fgNewStmtFromTree(GenTree* tree, const DebugInfo& di); GenTree* fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst = nullptr); void fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt); void fgExpandQmarkStmt(BasicBlock* block, Statement* stmt); void fgExpandQmarkNodes(); // Do "simple lowering." This functionality is (conceptually) part of "general" // lowering that is distributed between fgMorph and the lowering phase of LSRA. void fgSimpleLowering(); GenTree* fgInitThisClass(); GenTreeCall* fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper); GenTreeCall* fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls); bool backendRequiresLocalVarLifetimes() { return !opts.MinOpts() || m_pLinearScan->willEnregisterLocalVars(); } void fgLocalVarLiveness(); void fgLocalVarLivenessInit(); void fgPerNodeLocalVarLiveness(GenTree* node); void fgPerBlockLocalVarLiveness(); VARSET_VALRET_TP fgGetHandlerLiveVars(BasicBlock* block); void fgLiveVarAnalysis(bool updateInternalOnly = false); void fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call); void fgComputeLifeTrackedLocalUse(VARSET_TP& life, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeTrackedLocalDef(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeUntrackedLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* lclVarNode); bool fgComputeLifeLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, GenTree* lclVarNode); void fgComputeLife(VARSET_TP& life, GenTree* startNode, GenTree* endNode, VARSET_VALARG_TP volatileVars, bool* pStmtInfoDirty DEBUGARG(bool* treeModf)); void fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALARG_TP volatileVars); bool fgTryRemoveNonLocal(GenTree* node, LIR::Range* blockRange); void fgRemoveDeadStoreLIR(GenTree* store, BasicBlock* block); bool fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_VALARG_TP life, bool* doAgain, bool* pStmtInfoDirty, bool* pStoreRemoved DEBUGARG(bool* treeModf)); void fgInterBlockLocalVarLiveness(); // Blocks: convenience methods for enabling range-based `for` iteration over the function's blocks, e.g.: // 1. for (BasicBlock* const block : compiler->Blocks()) ... // 2. for (BasicBlock* const block : compiler->Blocks(startBlock)) ... // 3. for (BasicBlock* const block : compiler->Blocks(startBlock, endBlock)) ... // In case (1), the block list can be empty. In case (2), `startBlock` can be nullptr. In case (3), // both `startBlock` and `endBlock` must be non-null. // BasicBlockSimpleList Blocks() const { return BasicBlockSimpleList(fgFirstBB); } BasicBlockSimpleList Blocks(BasicBlock* startBlock) const { return BasicBlockSimpleList(startBlock); } BasicBlockRangeList Blocks(BasicBlock* startBlock, BasicBlock* endBlock) const { return BasicBlockRangeList(startBlock, endBlock); } // The presence of a partial definition presents some difficulties for SSA: this is both a use of some SSA name // of "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose // whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us // to record/recover the "def" SSA number, given the lcl var node for "x" in such a tree. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, unsigned> NodeToUnsignedMap; NodeToUnsignedMap* m_opAsgnVarDefSsaNums; NodeToUnsignedMap* GetOpAsgnVarDefSsaNums() { if (m_opAsgnVarDefSsaNums == nullptr) { m_opAsgnVarDefSsaNums = new (getAllocator()) NodeToUnsignedMap(getAllocator()); } return m_opAsgnVarDefSsaNums; } // This map tracks nodes whose value numbers explicitly or implicitly depend on memory states. // The map provides the entry block of the most closely enclosing loop that // defines the memory region accessed when defining the nodes's VN. // // This information should be consulted when considering hoisting node out of a loop, as the VN // for the node will only be valid within the indicated loop. // // It is not fine-grained enough to track memory dependence within loops, so cannot be used // for more general code motion. // // If a node does not have an entry in the map we currently assume the VN is not memory dependent // and so memory does not constrain hoisting. // typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, BasicBlock*> NodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* m_nodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* GetNodeToLoopMemoryBlockMap() { if (m_nodeToLoopMemoryBlockMap == nullptr) { m_nodeToLoopMemoryBlockMap = new (getAllocator()) NodeToLoopMemoryBlockMap(getAllocator()); } return m_nodeToLoopMemoryBlockMap; } void optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN); void optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree); // Requires value numbering phase to have completed. Returns the value number ("gtVN") of the // "tree," EXCEPT in the case of GTF_VAR_USEASG, because the tree node's gtVN member is the // "use" VN. Performs a lookup into the map of (use asg tree -> def VN.) to return the "def's" // VN. inline ValueNum GetUseAsgDefVNOrTreeVN(GenTree* tree); // Requires that "lcl" has the GTF_VAR_DEF flag set. Returns the SSA number of "lcl". // Except: assumes that lcl is a def, and if it is // a partial def (GTF_VAR_USEASG), looks up and returns the SSA number for the "def", // rather than the "use" SSA number recorded in the tree "lcl". inline unsigned GetSsaNumForLocalVarDef(GenTree* lcl); inline bool PreciseRefCountsRequired(); // Performs SSA conversion. void fgSsaBuild(); // Reset any data structures to the state expected by "fgSsaBuild", so it can be run again. void fgResetForSsa(); unsigned fgSsaPassesCompleted; // Number of times fgSsaBuild has been run. // Returns "true" if this is a special variable that is never zero initialized in the prolog. inline bool fgVarIsNeverZeroInitializedInProlog(unsigned varNum); // Returns "true" if the variable needs explicit zero initialization. inline bool fgVarNeedsExplicitZeroInit(unsigned varNum, bool bbInALoop, bool bbIsReturn); // The value numbers for this compilation. ValueNumStore* vnStore; public: ValueNumStore* GetValueNumStore() { return vnStore; } // Do value numbering (assign a value number to each // tree node). void fgValueNumber(); // Computes new GcHeap VN via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN. // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // The 'indType' is the indirection type of the lhs of the assignment and will typically // match the element type of the array or fldSeq. When this type doesn't match // or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN] // ValueNum fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, FieldSeqNode* fldSeq, ValueNum rhsVN, var_types indType); // Requires that "tree" is a GT_IND marked as an array index, and that its address argument // has been parsed to yield the other input arguments. If evaluation of the address // can raise exceptions, those should be captured in the exception set "addrXvnp". // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // Marks "tree" with the VN for H[elemTypeEq][arrVN][inx][fldSeq] (for the liberal VN; a new unique // VN for the conservative VN.) Also marks the tree's argument as the address of an array element. // The type tree->TypeGet() will typically match the element type of the array or fldSeq. // When this type doesn't match or if the fldSeq is 'NotAField' we return a new unique VN // ValueNum fgValueNumberArrIndexVal(GenTree* tree, CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, ValueNumPair addrXvnp, FieldSeqNode* fldSeq); // Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvnp" to represent the exception set thrown // by evaluating the array index expression "tree". Returns the value number resulting from // dereferencing the array in the current GcHeap state. If "tree" is non-null, it must be the // "GT_IND" that does the dereference, and it is given the returned value number. ValueNum fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* funcApp, ValueNumPair addrXvnp); // Compute the value number for a byref-exposed load of the given type via the given pointerVN. ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN); unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run. // Utility functions for fgValueNumber. // Perform value-numbering for the trees in "blk". void fgValueNumberBlock(BasicBlock* blk); // Requires that "entryBlock" is the entry block of loop "loopNum", and that "loopNum" is the // innermost loop of which "entryBlock" is the entry. Returns the value number that should be // assumed for the memoryKind at the start "entryBlk". ValueNum fgMemoryVNForLoopSideEffects(MemoryKind memoryKind, BasicBlock* entryBlock, unsigned loopNum); // Called when an operation (performed by "tree", described by "msg") may cause the GcHeap to be mutated. // As GcHeap is a subset of ByrefExposed, this will also annotate the ByrefExposed mutation. void fgMutateGcHeap(GenTree* tree DEBUGARG(const char* msg)); // Called when an operation (performed by "tree", described by "msg") may cause an address-exposed local to be // mutated. void fgMutateAddressExposedLocal(GenTree* tree DEBUGARG(const char* msg)); // For a GC heap store at curTree, record the new curMemoryVN's and update curTree's MemorySsaMap. // As GcHeap is a subset of ByrefExposed, this will also record the ByrefExposed store. void recordGcHeapStore(GenTree* curTree, ValueNum gcHeapVN DEBUGARG(const char* msg)); // For a store to an address-exposed local at curTree, record the new curMemoryVN and update curTree's MemorySsaMap. void recordAddressExposedLocalStore(GenTree* curTree, ValueNum memoryVN DEBUGARG(const char* msg)); void fgSetCurrentMemoryVN(MemoryKind memoryKind, ValueNum newMemoryVN); // Tree caused an update in the current memory VN. If "tree" has an associated heap SSA #, record that // value in that SSA #. void fgValueNumberRecordMemorySsa(MemoryKind memoryKind, GenTree* tree); // The input 'tree' is a leaf node that is a constant // Assign the proper value number to the tree void fgValueNumberTreeConst(GenTree* tree); // If the VN store has been initialized, reassign the // proper value number to the constant tree. void fgUpdateConstTreeValueNumber(GenTree* tree); // Assumes that all inputs to "tree" have had value numbers assigned; assigns a VN to tree. // (With some exceptions: the VN of the lhs of an assignment is assigned as part of the // assignment.) void fgValueNumberTree(GenTree* tree); void fgValueNumberAssignment(GenTreeOp* tree); // Does value-numbering for a block assignment. void fgValueNumberBlockAssignment(GenTree* tree); bool fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, FieldSeqNode* dstFldSeq, GenTree* src); // Does value-numbering for a cast tree. void fgValueNumberCastTree(GenTree* tree); // Does value-numbering for an intrinsic tree. void fgValueNumberIntrinsic(GenTree* tree); #ifdef FEATURE_SIMD // Does value-numbering for a GT_SIMD tree void fgValueNumberSimd(GenTreeSIMD* tree); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS // Does value-numbering for a GT_HWINTRINSIC tree void fgValueNumberHWIntrinsic(GenTreeHWIntrinsic* tree); #endif // FEATURE_HW_INTRINSICS // Does value-numbering for a call. We interpret some helper calls. void fgValueNumberCall(GenTreeCall* call); // Does value-numbering for a helper representing a cast operation. void fgValueNumberCastHelper(GenTreeCall* call); // Does value-numbering for a helper "call" that has a VN function symbol "vnf". void fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc); // Requires "helpCall" to be a helper call. Assigns it a value number; // we understand the semantics of some of the calls. Returns "true" if // the call may modify the heap (we assume arbitrary memory side effects if so). bool fgValueNumberHelperCall(GenTreeCall* helpCall); // Requires that "helpFunc" is one of the pure Jit Helper methods. // Returns the corresponding VNFunc to use for value numbering VNFunc fgValueNumberJitHelperMethodVNFunc(CorInfoHelpFunc helpFunc); // Adds the exception set for the current tree node which has a memory indirection operation void fgValueNumberAddExceptionSetForIndirection(GenTree* tree, GenTree* baseAddr); // Adds the exception sets for the current tree node which is performing a division or modulus operation void fgValueNumberAddExceptionSetForDivision(GenTree* tree); // Adds the exception set for the current tree node which is performing a overflow checking operation void fgValueNumberAddExceptionSetForOverflow(GenTree* tree); // Adds the exception set for the current tree node which is performing a bounds check operation void fgValueNumberAddExceptionSetForBoundsCheck(GenTree* tree); // Adds the exception set for the current tree node which is performing a ckfinite operation void fgValueNumberAddExceptionSetForCkFinite(GenTree* tree); // Adds the exception sets for the current tree node void fgValueNumberAddExceptionSet(GenTree* tree); #ifdef DEBUG void fgDebugCheckExceptionSets(); void fgDebugCheckValueNumberedTree(GenTree* tree); #endif // These are the current value number for the memory implicit variables while // doing value numbering. These are the value numbers under the "liberal" interpretation // of memory values; the "conservative" interpretation needs no VN, since every access of // memory yields an unknown value. ValueNum fgCurMemoryVN[MemoryKindCount]; // Return a "pseudo"-class handle for an array element type. If "elemType" is TYP_STRUCT, // requires "elemStructType" to be non-null (and to have a low-order zero). Otherwise, low order bit // is 1, and the rest is an encoding of "elemTyp". static CORINFO_CLASS_HANDLE EncodeElemType(var_types elemTyp, CORINFO_CLASS_HANDLE elemStructType) { if (elemStructType != nullptr) { assert(varTypeIsStruct(elemTyp) || elemTyp == TYP_REF || elemTyp == TYP_BYREF || varTypeIsIntegral(elemTyp)); assert((size_t(elemStructType) & 0x1) == 0x0); // Make sure the encoding below is valid. return elemStructType; } else { assert(elemTyp != TYP_STRUCT); elemTyp = varTypeToSigned(elemTyp); return CORINFO_CLASS_HANDLE(size_t(elemTyp) << 1 | 0x1); } } // If "clsHnd" is the result of an "EncodePrim" call, returns true and sets "*pPrimType" to the // var_types it represents. Otherwise, returns TYP_STRUCT (on the assumption that "clsHnd" is // the struct type of the element). static var_types DecodeElemType(CORINFO_CLASS_HANDLE clsHnd) { size_t clsHndVal = size_t(clsHnd); if (clsHndVal & 0x1) { return var_types(clsHndVal >> 1); } else { return TYP_STRUCT; } } // Convert a BYTE which represents the VM's CorInfoGCtype to the JIT's var_types var_types getJitGCType(BYTE gcType); // Returns true if the provided type should be treated as a primitive type // for the unmanaged calling conventions. bool isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd); enum structPassingKind { SPK_Unknown, // Invalid value, never returned SPK_PrimitiveType, // The struct is passed/returned using a primitive type. SPK_EnclosingType, // Like SPK_Primitive type, but used for return types that // require a primitive type temp that is larger than the struct size. // Currently used for structs of size 3, 5, 6, or 7 bytes. SPK_ByValue, // The struct is passed/returned by value (using the ABI rules) // for ARM64 and UNIX_X64 in multiple registers. (when all of the // parameters registers are used, then the stack will be used) // for X86 passed on the stack, for ARM32 passed in registers // or the stack or split between registers and the stack. SPK_ByValueAsHfa, // The struct is passed/returned as an HFA in multiple registers. SPK_ByReference }; // The struct is passed/returned by reference to a copy/buffer. // Get the "primitive" type that is is used when we are given a struct of size 'structSize'. // For pointer sized structs the 'clsHnd' is used to determine if the struct contains GC ref. // A "primitive" type is one of the scalar types: byte, short, int, long, ref, float, double // If we can't or shouldn't use a "primitive" type then TYP_UNKNOWN is returned. // // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg); // Get the type that is used to pass values of the given struct type. // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, structPassingKind* wbPassStruct, bool isVarArg, unsigned structSize); // Get the type that is used to return values of the given struct type. // If the size is unknown, pass 0 and it will be determined from 'clsHnd'. var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, CorInfoCallConvExtension callConv, structPassingKind* wbPassStruct = nullptr, unsigned structSize = 0); #ifdef DEBUG // Print a representation of "vnp" or "vn" on standard output. // If "level" is non-zero, we also print out a partial expansion of the value. void vnpPrint(ValueNumPair vnp, unsigned level); void vnPrint(ValueNum vn, unsigned level); #endif bool fgDominate(BasicBlock* b1, BasicBlock* b2); // Return true if b1 dominates b2 // Dominator computation member functions // Not exposed outside Compiler protected: bool fgReachable(BasicBlock* b1, BasicBlock* b2); // Returns true if block b1 can reach block b2 // Compute immediate dominators, the dominator tree and and its pre/post-order travsersal numbers. void fgComputeDoms(); void fgCompDominatedByExceptionalEntryBlocks(); BlockSet_ValRet_T fgGetDominatorSet(BasicBlock* block); // Returns a set of blocks that dominate the given block. // Note: this is relatively slow compared to calling fgDominate(), // especially if dealing with a single block versus block check. void fgComputeReachabilitySets(); // Compute bbReach sets. (Also sets BBF_GC_SAFE_POINT flag on blocks.) void fgComputeReturnBlocks(); // Initialize fgReturnBlocks to a list of BBJ_RETURN blocks. void fgComputeEnterBlocksSet(); // Compute the set of entry blocks, 'fgEnterBlks'. bool fgRemoveUnreachableBlocks(); // Remove blocks determined to be unreachable by the bbReach sets. void fgComputeReachability(); // Perform flow graph node reachability analysis. BasicBlock* fgIntersectDom(BasicBlock* a, BasicBlock* b); // Intersect two immediate dominator sets. void fgDfsInvPostOrder(); // In order to compute dominance using fgIntersectDom, the flow graph nodes must be // processed in topological sort, this function takes care of that. void fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count); BlockSet_ValRet_T fgDomFindStartNodes(); // Computes which basic blocks don't have incoming edges in the flow graph. // Returns this as a set. INDEBUG(void fgDispDomTree(DomTreeNode* domTree);) // Helper that prints out the Dominator Tree in debug builds. DomTreeNode* fgBuildDomTree(); // Once we compute all the immediate dominator sets for each node in the flow graph // (performed by fgComputeDoms), this procedure builds the dominance tree represented // adjacency lists. // In order to speed up the queries of the form 'Does A dominates B', we can perform a DFS preorder and postorder // traversal of the dominance tree and the dominance query will become A dominates B iif preOrder(A) <= preOrder(B) // && postOrder(A) >= postOrder(B) making the computation O(1). void fgNumberDomTree(DomTreeNode* domTree); // When the flow graph changes, we need to update the block numbers, predecessor lists, reachability sets, // dominators, and possibly loops. void fgUpdateChangedFlowGraph(const bool computePreds = true, const bool computeDoms = true, const bool computeReturnBlocks = false, const bool computeLoops = false); public: // Compute the predecessors of the blocks in the control flow graph. void fgComputePreds(); // Remove all predecessor information. void fgRemovePreds(); // Compute the cheap flow graph predecessors lists. This is used in some early phases // before the full predecessors lists are computed. void fgComputeCheapPreds(); private: void fgAddCheapPred(BasicBlock* block, BasicBlock* blockPred); void fgRemoveCheapPred(BasicBlock* block, BasicBlock* blockPred); public: enum GCPollType { GCPOLL_NONE, GCPOLL_CALL, GCPOLL_INLINE }; // Initialize the per-block variable sets (used for liveness analysis). void fgInitBlockVarSets(); PhaseStatus fgInsertGCPolls(); BasicBlock* fgCreateGCPoll(GCPollType pollType, BasicBlock* block); // Requires that "block" is a block that returns from // a finally. Returns the number of successors (jump targets of // of blocks in the covered "try" that did a "LEAVE".) unsigned fgNSuccsOfFinallyRet(BasicBlock* block); // Requires that "block" is a block that returns (in the sense of BBJ_EHFINALLYRET) from // a finally. Returns its "i"th successor (jump targets of // of blocks in the covered "try" that did a "LEAVE".) // Requires that "i" < fgNSuccsOfFinallyRet(block). BasicBlock* fgSuccOfFinallyRet(BasicBlock* block, unsigned i); private: // Factor out common portions of the impls of the methods above. void fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres); public: // For many purposes, it is desirable to be able to enumerate the *distinct* targets of a switch statement, // skipping duplicate targets. (E.g., in flow analyses that are only interested in the set of possible targets.) // SwitchUniqueSuccSet contains the non-duplicated switch targets. // (Code that modifies the jump table of a switch has an obligation to call Compiler::UpdateSwitchTableTarget, // which in turn will call the "UpdateTarget" method of this type if a SwitchUniqueSuccSet has already // been computed for the switch block. If a switch block is deleted or is transformed into a non-switch, // we leave the entry associated with the block, but it will no longer be accessed.) struct SwitchUniqueSuccSet { unsigned numDistinctSuccs; // Number of distinct targets of the switch. BasicBlock** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target // successors. // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. Use "alloc" to do any required allocation. void UpdateTarget(CompAllocator alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); }; typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet> BlockToSwitchDescMap; private: // Maps BasicBlock*'s that end in switch statements to SwitchUniqueSuccSets that allow // iteration over only the distinct successors. BlockToSwitchDescMap* m_switchDescMap; public: BlockToSwitchDescMap* GetSwitchDescMap(bool createIfNull = true) { if ((m_switchDescMap == nullptr) && createIfNull) { m_switchDescMap = new (getAllocator()) BlockToSwitchDescMap(getAllocator()); } return m_switchDescMap; } // Invalidate the map of unique switch block successors. For example, since the hash key of the map // depends on block numbers, we must invalidate the map when the blocks are renumbered, to ensure that // we don't accidentally look up and return the wrong switch data. void InvalidateUniqueSwitchSuccMap() { m_switchDescMap = nullptr; } // Requires "switchBlock" to be a block that ends in a switch. Returns // the corresponding SwitchUniqueSuccSet. SwitchUniqueSuccSet GetDescriptorForSwitch(BasicBlock* switchBlk); // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. void UpdateSwitchTableTarget(BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); // Remove the "SwitchUniqueSuccSet" of "switchBlk" in the BlockToSwitchDescMap. void fgInvalidateSwitchDescMapEntry(BasicBlock* switchBlk); BasicBlock* fgFirstBlockOfHandler(BasicBlock* block); bool fgIsFirstBlockOfFilterOrHandler(BasicBlock* block); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, flowList*** ptrToPred); flowList* fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred); flowList* fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred); void fgRemoveBlockAsPred(BasicBlock* block); void fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock); void fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred); flowList* fgAddRefPred(BasicBlock* block, BasicBlock* blockPred, flowList* oldEdge = nullptr, bool initializingPreds = false); // Only set to 'true' when we are computing preds in // fgComputePreds() void fgFindBasicBlocks(); bool fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt); bool fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion); BasicBlock* fgFindInsertPoint(unsigned regionIndex, bool putInTryRegion, BasicBlock* startBlk, BasicBlock* endBlk, BasicBlock* nearBlk, BasicBlock* jumpBlk, bool runRarely); unsigned fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting = nullptr); void fgPostImportationCleanup(); void fgRemoveStmt(BasicBlock* block, Statement* stmt DEBUGARG(bool isUnlink = false)); void fgUnlinkStmt(BasicBlock* block, Statement* stmt); bool fgCheckRemoveStmt(BasicBlock* block, Statement* stmt); void fgCreateLoopPreHeader(unsigned lnum); void fgUnreachableBlock(BasicBlock* block); void fgRemoveConditionalJump(BasicBlock* block); BasicBlock* fgLastBBInMainFunction(); BasicBlock* fgEndBBAfterMainFunction(); void fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd); void fgRemoveBlock(BasicBlock* block, bool unreachable); bool fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext); BasicBlock* fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst); bool fgRenumberBlocks(); bool fgExpandRarelyRunBlocks(); bool fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter); void fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk); enum FG_RELOCATE_TYPE { FG_RELOCATE_TRY, // relocate the 'try' region FG_RELOCATE_HANDLER // relocate the handler region (including the filter if necessary) }; BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType); #if defined(FEATURE_EH_FUNCLETS) #if defined(TARGET_ARM) void fgClearFinallyTargetBit(BasicBlock* block); #endif // defined(TARGET_ARM) bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block); bool fgAnyIntraHandlerPreds(BasicBlock* block); void fgInsertFuncletPrologBlock(BasicBlock* block); void fgCreateFuncletPrologBlocks(); void fgCreateFunclets(); #else // !FEATURE_EH_FUNCLETS bool fgRelocateEHRegions(); #endif // !FEATURE_EH_FUNCLETS bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target); bool fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNum); bool fgBlockIsGoodTailDuplicationCandidate(BasicBlock* block, unsigned* lclNum); bool fgOptimizeEmptyBlock(BasicBlock* block); bool fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest); bool fgOptimizeBranch(BasicBlock* bJump); bool fgOptimizeSwitchBranches(BasicBlock* block); bool fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev); bool fgOptimizeSwitchJumps(); #ifdef DEBUG void fgPrintEdgeWeights(); #endif void fgComputeBlockAndEdgeWeights(); weight_t fgComputeMissingBlockWeights(); void fgComputeCalledCount(weight_t returnWeight); void fgComputeEdgeWeights(); bool fgReorderBlocks(); PhaseStatus fgDetermineFirstColdBlock(); bool fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc = nullptr); bool fgUpdateFlowGraph(bool doTailDup = false); void fgFindOperOrder(); // method that returns if you should split here typedef bool(fgSplitPredicate)(GenTree* tree, GenTree* parent, fgWalkData* data); void fgSetBlockOrder(); void fgRemoveReturnBlock(BasicBlock* block); /* Helper code that has been factored out */ inline void fgConvertBBToThrowBB(BasicBlock* block); bool fgCastNeeded(GenTree* tree, var_types toType); GenTree* fgDoNormalizeOnStore(GenTree* tree); GenTree* fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry); // The following check for loops that don't execute calls bool fgLoopCallMarked; void fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB); void fgLoopCallMark(); void fgMarkLoopHead(BasicBlock* block); unsigned fgGetCodeEstimate(BasicBlock* block); #if DUMP_FLOWGRAPHS enum class PhasePosition { PrePhase, PostPhase }; const char* fgProcessEscapes(const char* nameIn, escapeMapping_t* map); static void fgDumpTree(FILE* fgxFile, GenTree* const tree); FILE* fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePosition pos, LPCWSTR type); bool fgDumpFlowGraph(Phases phase, PhasePosition pos); #endif // DUMP_FLOWGRAPHS #ifdef DEBUG void fgDispDoms(); void fgDispReach(); void fgDispBBLiveness(BasicBlock* block); void fgDispBBLiveness(); void fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth = 0); void fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees); void fgDispBasicBlocks(bool dumpTrees = false); void fgDumpStmtTree(Statement* stmt, unsigned bbNum); void fgDumpBlock(BasicBlock* block); void fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock); static fgWalkPreFn fgStress64RsltMulCB; void fgStress64RsltMul(); void fgDebugCheckUpdate(); void fgDebugCheckBBNumIncreasing(); void fgDebugCheckBBlist(bool checkBBNum = false, bool checkBBRefs = true); void fgDebugCheckBlockLinks(); void fgDebugCheckLinks(bool morphTrees = false); void fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees); void fgDebugCheckNodeLinks(BasicBlock* block, Statement* stmt); void fgDebugCheckNodesUniqueness(); void fgDebugCheckLoopTable(); void fgDebugCheckFlags(GenTree* tree); void fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags); void fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags); void fgDebugCheckTryFinallyExits(); void fgDebugCheckProfileData(); bool fgDebugCheckIncomingProfileData(BasicBlock* block); bool fgDebugCheckOutgoingProfileData(BasicBlock* block); #endif // DEBUG static bool fgProfileWeightsEqual(weight_t weight1, weight_t weight2); static bool fgProfileWeightsConsistent(weight_t weight1, weight_t weight2); static GenTree* fgGetFirstNode(GenTree* tree); //--------------------- Walking the trees in the IR ----------------------- struct fgWalkData { Compiler* compiler; fgWalkPreFn* wtprVisitorFn; fgWalkPostFn* wtpoVisitorFn; void* pCallbackData; // user-provided data GenTree* parent; // parent of current node, provided to callback GenTreeStack* parentStack; // stack of parent nodes, if asked for bool wtprLclsOnly; // whether to only visit lclvar nodes #ifdef DEBUG bool printModified; // callback can use this #endif }; fgWalkResult fgWalkTreePre(GenTree** pTree, fgWalkPreFn* visitor, void* pCallBackData = nullptr, bool lclVarsOnly = false, bool computeStack = false); fgWalkResult fgWalkTree(GenTree** pTree, fgWalkPreFn* preVisitor, fgWalkPostFn* postVisitor, void* pCallBackData = nullptr); void fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData); //----- Postorder fgWalkResult fgWalkTreePost(GenTree** pTree, fgWalkPostFn* visitor, void* pCallBackData = nullptr, bool computeStack = false); // An fgWalkPreFn that looks for expressions that have inline throws in // minopts mode. Basically it looks for tress with gtOverflowEx() or // GTF_IND_RNGCHK. It returns WALK_ABORT if one is found. It // returns WALK_SKIP_SUBTREES if GTF_EXCEPT is not set (assumes flags // properly propagated to parent trees). It returns WALK_CONTINUE // otherwise. static fgWalkResult fgChkThrowCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkLocAllocCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkQmarkCB(GenTree** pTree, Compiler::fgWalkData* data); /************************************************************************** * PROTECTED *************************************************************************/ protected: friend class SsaBuilder; friend struct ValueNumberState; //--------------------- Detect the basic blocks --------------------------- BasicBlock** fgBBs; // Table of pointers to the BBs void fgInitBBLookup(); BasicBlock* fgLookupBB(unsigned addr); bool fgCanSwitchToOptimized(); void fgSwitchToOptimized(const char* reason); bool fgMayExplicitTailCall(); void fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock); void fgLinkBasicBlocks(); unsigned fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgCheckBasicBlockControlFlow(); void fgControlFlowPermitted(BasicBlock* blkSrc, BasicBlock* blkDest, bool IsLeave = false /* is the src a leave block */); bool fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling); void fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining); void fgAdjustForAddressExposedOrWrittenThis(); unsigned fgStressBBProf() { #ifdef DEBUG unsigned result = JitConfig.JitStressBBProf(); if (result == 0) { if (compStressCompile(STRESS_BB_PROFILE, 15)) { result = 1; } } return result; #else return 0; #endif } bool fgHaveProfileData(); bool fgGetProfileWeightForBasicBlock(IL_OFFSET offset, weight_t* weight); Instrumentor* fgCountInstrumentor; Instrumentor* fgClassInstrumentor; PhaseStatus fgPrepareToInstrumentMethod(); PhaseStatus fgInstrumentMethod(); PhaseStatus fgIncorporateProfileData(); void fgIncorporateBlockCounts(); void fgIncorporateEdgeCounts(); CORINFO_CLASS_HANDLE getRandomClass(ICorJitInfo::PgoInstrumentationSchema* schema, UINT32 countSchemaItems, BYTE* pInstrumentationData, int32_t ilOffset, CLRRandom* random); public: const char* fgPgoFailReason; bool fgPgoDisabled; ICorJitInfo::PgoSource fgPgoSource; ICorJitInfo::PgoInstrumentationSchema* fgPgoSchema; BYTE* fgPgoData; UINT32 fgPgoSchemaCount; HRESULT fgPgoQueryResult; UINT32 fgNumProfileRuns; UINT32 fgPgoBlockCounts; UINT32 fgPgoEdgeCounts; UINT32 fgPgoClassProfiles; unsigned fgPgoInlineePgo; unsigned fgPgoInlineeNoPgo; unsigned fgPgoInlineeNoPgoSingleBlock; void WalkSpanningTree(SpanningTreeVisitor* visitor); void fgSetProfileWeight(BasicBlock* block, weight_t weight); void fgApplyProfileScale(); bool fgHaveSufficientProfileData(); bool fgHaveTrustedProfileData(); // fgIsUsingProfileWeights - returns true if we have real profile data for this method // or if we have some fake profile data for the stress mode bool fgIsUsingProfileWeights() { return (fgHaveProfileData() || fgStressBBProf()); } // fgProfileRunsCount - returns total number of scenario runs for the profile data // or BB_UNITY_WEIGHT_UNSIGNED when we aren't using profile data. unsigned fgProfileRunsCount() { return fgIsUsingProfileWeights() ? fgNumProfileRuns : BB_UNITY_WEIGHT_UNSIGNED; } //-------- Insert a statement at the start or end of a basic block -------- #ifdef DEBUG public: static bool fgBlockContainsStatementBounded(BasicBlock* block, Statement* stmt, bool answerOnBoundExceeded = true); #endif public: Statement* fgNewStmtAtBeg(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); void fgInsertStmtAtEnd(BasicBlock* block, Statement* stmt); Statement* fgNewStmtAtEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); Statement* fgNewStmtNearEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); private: void fgInsertStmtNearEnd(BasicBlock* block, Statement* stmt); void fgInsertStmtAtBeg(BasicBlock* block, Statement* stmt); void fgInsertStmtAfter(BasicBlock* block, Statement* insertionPoint, Statement* stmt); public: void fgInsertStmtBefore(BasicBlock* block, Statement* insertionPoint, Statement* stmt); private: Statement* fgInsertStmtListAfter(BasicBlock* block, Statement* stmtAfter, Statement* stmtList); // Create a new temporary variable to hold the result of *ppTree, // and transform the graph accordingly. GenTree* fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr); GenTree* fgMakeMultiUse(GenTree** ppTree); private: // Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node. GenTree* fgRecognizeAndMorphBitwiseRotation(GenTree* tree); bool fgOperIsBitwiseRotationRoot(genTreeOps oper); #if !defined(TARGET_64BIT) // Recognize and morph a long multiplication with 32 bit operands. GenTreeOp* fgRecognizeAndMorphLongMul(GenTreeOp* mul); GenTreeOp* fgMorphLongMul(GenTreeOp* mul); #endif //-------- Determine the order in which the trees will be evaluated ------- unsigned fgTreeSeqNum; GenTree* fgTreeSeqLst; GenTree* fgTreeSeqBeg; GenTree* fgSetTreeSeq(GenTree* tree, GenTree* prev = nullptr, bool isLIR = false); void fgSetTreeSeqHelper(GenTree* tree, bool isLIR); void fgSetTreeSeqFinish(GenTree* tree, bool isLIR); void fgSetStmtSeq(Statement* stmt); void fgSetBlockOrder(BasicBlock* block); //------------------------- Morphing -------------------------------------- unsigned fgPtrArgCntMax; public: //------------------------------------------------------------------------ // fgGetPtrArgCntMax: Return the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This value is calculated during morph. // // Return Value: // Returns fgPtrArgCntMax, that is a private field. // unsigned fgGetPtrArgCntMax() const { return fgPtrArgCntMax; } //------------------------------------------------------------------------ // fgSetPtrArgCntMax: Set the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This function is used during StackLevelSetter to fix incorrect morph calculations. // void fgSetPtrArgCntMax(unsigned argCntMax) { fgPtrArgCntMax = argCntMax; } bool compCanEncodePtrArgCntMax(); private: hashBv* fgOutgoingArgTemps; hashBv* fgCurrentlyInUseArgTemps; void fgSetRngChkTarget(GenTree* tree, bool delay = true); BasicBlock* fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay); #if REARRANGE_ADDS void fgMoveOpsLeft(GenTree* tree); #endif bool fgIsCommaThrow(GenTree* tree, bool forFolding = false); bool fgIsThrow(GenTree* tree); bool fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2); bool fgIsBlockCold(BasicBlock* block); GenTree* fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper); GenTree* fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs = true); GenTree* fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs); // A "MorphAddrContext" carries information from the surrounding context. If we are evaluating a byref address, // it is useful to know whether the address will be immediately dereferenced, or whether the address value will // be used, perhaps by passing it as an argument to a called method. This affects how null checking is done: // for sufficiently small offsets, we can rely on OS page protection to implicitly null-check addresses that we // know will be dereferenced. To know that reliance on implicit null checking is sound, we must further know that // all offsets between the top-level indirection and the bottom are constant, and that their sum is sufficiently // small; hence the other fields of MorphAddrContext. enum MorphAddrContextKind { MACK_Ind, MACK_Addr, }; struct MorphAddrContext { MorphAddrContextKind m_kind; bool m_allConstantOffsets; // Valid only for "m_kind == MACK_Ind". True iff all offsets between // top-level indirection and here have been constants. size_t m_totalOffset; // Valid only for "m_kind == MACK_Ind", and if "m_allConstantOffsets" is true. // In that case, is the sum of those constant offsets. MorphAddrContext(MorphAddrContextKind kind) : m_kind(kind), m_allConstantOffsets(true), m_totalOffset(0) { } }; // A MACK_CopyBlock context is immutable, so we can just make one of these and share it. static MorphAddrContext s_CopyBlockMAC; #ifdef FEATURE_SIMD GenTree* getSIMDStructFromField(GenTree* tree, CorInfoType* simdBaseJitTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic = false); GenTree* fgMorphFieldAssignToSimdSetElement(GenTree* tree); GenTree* fgMorphFieldToSimdGetElement(GenTree* tree); bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt); void impMarkContiguousSIMDFieldAssignments(Statement* stmt); // fgPreviousCandidateSIMDFieldAsgStmt is only used for tracking previous simd field assignment // in function: Complier::impMarkContiguousSIMDFieldAssignments. Statement* fgPreviousCandidateSIMDFieldAsgStmt; #endif // FEATURE_SIMD GenTree* fgMorphArrayIndex(GenTree* tree); GenTree* fgMorphExpandCast(GenTreeCast* tree); GenTreeFieldList* fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl); void fgInitArgInfo(GenTreeCall* call); GenTreeCall* fgMorphArgs(GenTreeCall* call); void fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass); GenTree* fgMorphLocalVar(GenTree* tree, bool forceRemorph); public: bool fgAddrCouldBeNull(GenTree* addr); private: GenTree* fgMorphField(GenTree* tree, MorphAddrContext* mac); bool fgCanFastTailCall(GenTreeCall* call, const char** failReason); #if FEATURE_FASTTAILCALL bool fgCallHasMustCopyByrefParameter(GenTreeCall* callee); #endif bool fgCheckStmtAfterTailCall(); GenTree* fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help); bool fgCanTailCallViaJitHelper(); void fgMorphTailCallViaJitHelper(GenTreeCall* call); GenTree* fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall, CORINFO_METHOD_HANDLE callTargetStubHnd, CORINFO_METHOD_HANDLE dispatcherHnd); GenTree* getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle); GenTree* getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* getVirtMethodPointerTree(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent); GenTree* fgMorphPotentialTailCall(GenTreeCall* call); GenTree* fgGetStubAddrArg(GenTreeCall* call); unsigned fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry); void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall); Statement* fgAssignRecursiveCallArgToCallerParam(GenTree* arg, fgArgTabEntry* argTabEntry, unsigned lclParamNum, BasicBlock* block, const DebugInfo& callDI, Statement* tmpAssignmentInsertionPoint, Statement* paramAssignmentInsertionPoint); GenTree* fgMorphCall(GenTreeCall* call); GenTree* fgExpandVirtualVtableCallTarget(GenTreeCall* call); void fgMorphCallInline(GenTreeCall* call, InlineResult* result); void fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); #if DEBUG void fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call); static fgWalkPreFn fgFindNonInlineCandidate; #endif GenTree* fgOptimizeDelegateConstructor(GenTreeCall* call, CORINFO_CONTEXT_HANDLE* ExactContextHnd, CORINFO_RESOLVED_TOKEN* ldftnToken); GenTree* fgMorphLeaf(GenTree* tree); void fgAssignSetVarDef(GenTree* tree); GenTree* fgMorphOneAsgBlockOp(GenTree* tree); GenTree* fgMorphInitBlock(GenTree* tree); GenTree* fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize); GenTree* fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue = false); GenTree* fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd); GenTree* fgMorphCopyBlock(GenTree* tree); GenTree* fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree); GenTree* fgMorphForRegisterFP(GenTree* tree); GenTree* fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac = nullptr); GenTree* fgOptimizeCast(GenTreeCast* cast); GenTree* fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp); GenTree* fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp); #ifdef FEATURE_HW_INTRINSICS GenTree* fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node); #endif GenTree* fgOptimizeCommutativeArithmetic(GenTreeOp* tree); GenTree* fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp); GenTree* fgOptimizeAddition(GenTreeOp* add); GenTree* fgOptimizeMultiply(GenTreeOp* mul); GenTree* fgOptimizeBitwiseAnd(GenTreeOp* andOp); GenTree* fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects); GenTree* fgMorphRetInd(GenTreeUnOp* tree); GenTree* fgMorphModToSubMulDiv(GenTreeOp* tree); GenTree* fgMorphUModToAndSub(GenTreeOp* tree); GenTree* fgMorphSmpOpOptional(GenTreeOp* tree); GenTree* fgMorphMultiOp(GenTreeMultiOp* multiOp); GenTree* fgMorphConst(GenTree* tree); bool fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2); GenTreeLclVar* fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes = true); GenTreeOp* fgMorphCommutative(GenTreeOp* tree); GenTree* fgMorphCastedBitwiseOp(GenTreeOp* tree); GenTree* fgMorphReduceAddOps(GenTree* tree); public: GenTree* fgMorphTree(GenTree* tree, MorphAddrContext* mac = nullptr); private: void fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree)); void fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree)); void fgMorphTreeDone(GenTree* tree, GenTree* oldTree = nullptr DEBUGARG(int morphNum = 0)); Statement* fgMorphStmt; unsigned fgGetBigOffsetMorphingTemp(var_types type); // We cache one temp per type to be // used when morphing big offset. //----------------------- Liveness analysis ------------------------------- VARSET_TP fgCurUseSet; // vars used by block (before an assignment) VARSET_TP fgCurDefSet; // vars assigned by block (before a use) MemoryKindSet fgCurMemoryUse; // True iff the current basic block uses memory. MemoryKindSet fgCurMemoryDef; // True iff the current basic block modifies memory. MemoryKindSet fgCurMemoryHavoc; // True if the current basic block is known to set memory to a "havoc" value. bool byrefStatesMatchGcHeapStates; // True iff GcHeap and ByrefExposed memory have all the same def points. void fgMarkUseDef(GenTreeLclVarCommon* tree); void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgMarkInScope(BasicBlock* block, VARSET_VALARG_TP inScope); void fgUnmarkInScope(BasicBlock* block, VARSET_VALARG_TP unmarkScope); void fgExtendDbgScopes(); void fgExtendDbgLifetimes(); #ifdef DEBUG void fgDispDebugScopes(); #endif // DEBUG //------------------------------------------------------------------------- // // The following keeps track of any code we've added for things like array // range checking or explicit calls to enable GC, and so on. // public: struct AddCodeDsc { AddCodeDsc* acdNext; BasicBlock* acdDstBlk; // block to which we jump unsigned acdData; SpecialCodeKind acdKind; // what kind of a special block is this? #if !FEATURE_FIXED_OUT_ARGS bool acdStkLvlInit; // has acdStkLvl value been already set? unsigned acdStkLvl; // stack level in stack slots. #endif // !FEATURE_FIXED_OUT_ARGS }; private: static unsigned acdHelper(SpecialCodeKind codeKind); AddCodeDsc* fgAddCodeList; bool fgAddCodeModf; bool fgRngChkThrowAdded; AddCodeDsc* fgExcptnTargetCache[SCK_COUNT]; BasicBlock* fgRngChkTarget(BasicBlock* block, SpecialCodeKind kind); BasicBlock* fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, SpecialCodeKind kind); public: AddCodeDsc* fgFindExcptnTarget(SpecialCodeKind kind, unsigned refData); bool fgUseThrowHelperBlocks(); AddCodeDsc* fgGetAdditionalCodeDescriptors() { return fgAddCodeList; } private: bool fgIsCodeAdded(); bool fgIsThrowHlpBlk(BasicBlock* block); #if !FEATURE_FIXED_OUT_ARGS unsigned fgThrowHlpBlkStkLevel(BasicBlock* block); #endif // !FEATURE_FIXED_OUT_ARGS unsigned fgBigOffsetMorphingTemps[TYP_COUNT]; unsigned fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo); void fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); void fgInsertInlineeBlocks(InlineInfo* pInlineInfo); Statement* fgInlinePrependStatements(InlineInfo* inlineInfo); void fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmt); #if FEATURE_MULTIREG_RET GenTree* fgGetStructAsStructPtr(GenTree* tree); GenTree* fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); void fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); #endif // FEATURE_MULTIREG_RET static fgWalkPreFn fgUpdateInlineReturnExpressionPlaceHolder; static fgWalkPostFn fgLateDevirtualization; #ifdef DEBUG static fgWalkPreFn fgDebugCheckInlineCandidates; void CheckNoTransformableIndirectCallsRemain(); static fgWalkPreFn fgDebugCheckForTransformableIndirectCalls; #endif void fgPromoteStructs(); void fgMorphStructField(GenTree* tree, GenTree* parent); void fgMorphLocalField(GenTree* tree, GenTree* parent); // Reset the refCount for implicit byrefs. void fgResetImplicitByRefRefCount(); // Change implicit byrefs' types from struct to pointer, and for any that were // promoted, create new promoted struct temps. void fgRetypeImplicitByRefArgs(); // Rewrite appearances of implicit byrefs (manifest the implied additional level of indirection). bool fgMorphImplicitByRefArgs(GenTree* tree); GenTree* fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr); // Clear up annotations for any struct promotion temps created for implicit byrefs. void fgMarkDemotedImplicitByRefArgs(); void fgMarkAddressExposedLocals(); void fgMarkAddressExposedLocals(Statement* stmt); PhaseStatus fgForwardSub(); bool fgForwardSubBlock(BasicBlock* block); bool fgForwardSubStatement(Statement* statement); static fgWalkPreFn fgUpdateSideEffectsPre; static fgWalkPostFn fgUpdateSideEffectsPost; // The given local variable, required to be a struct variable, is being assigned via // a "lclField", to make it masquerade as an integral type in the ABI. Make sure that // the variable is not enregistered, and is therefore not promoted independently. void fgLclFldAssign(unsigned lclNum); static fgWalkPreFn gtHasLocalsWithAddrOpCB; enum TypeProducerKind { TPK_Unknown = 0, // May not be a RuntimeType TPK_Handle = 1, // RuntimeType via handle TPK_GetType = 2, // RuntimeType via Object.get_Type() TPK_Null = 3, // Tree value is null TPK_Other = 4 // RuntimeType via other means }; TypeProducerKind gtGetTypeProducerKind(GenTree* tree); bool gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call); bool gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper = nullptr); bool gtIsActiveCSE_Candidate(GenTree* tree); bool fgIsBigOffset(size_t offset); bool fgNeedReturnSpillTemp(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Optimizer XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: void optInit(); GenTree* optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt); GenTree* optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt); void optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt); protected: // Do hoisting for all loops. void optHoistLoopCode(); // To represent sets of VN's that have already been hoisted in outer loops. typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, bool> VNSet; struct LoopHoistContext { private: // The set of variables hoisted in the current loop (or nullptr if there are none). VNSet* m_pHoistedInCurLoop; public: // Value numbers of expressions that have been hoisted in parent loops in the loop nest. VNSet m_hoistedInParentLoops; // Value numbers of expressions that have been hoisted in the current (or most recent) loop in the nest. // Previous decisions on loop-invariance of value numbers in the current loop. VNSet m_curLoopVnInvariantCache; VNSet* GetHoistedInCurLoop(Compiler* comp) { if (m_pHoistedInCurLoop == nullptr) { m_pHoistedInCurLoop = new (comp->getAllocatorLoopHoist()) VNSet(comp->getAllocatorLoopHoist()); } return m_pHoistedInCurLoop; } VNSet* ExtractHoistedInCurLoop() { VNSet* res = m_pHoistedInCurLoop; m_pHoistedInCurLoop = nullptr; return res; } LoopHoistContext(Compiler* comp) : m_pHoistedInCurLoop(nullptr) , m_hoistedInParentLoops(comp->getAllocatorLoopHoist()) , m_curLoopVnInvariantCache(comp->getAllocatorLoopHoist()) { } }; // Do hoisting for loop "lnum" (an index into the optLoopTable), and all loops nested within it. // Tracks the expressions that have been hoisted by containing loops by temporarily recording their // value numbers in "m_hoistedInParentLoops". This set is not modified by the call. void optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt); // Do hoisting for a particular loop ("lnum" is an index into the optLoopTable.) // Assumes that expressions have been hoisted in containing loops if their value numbers are in // "m_hoistedInParentLoops". // void optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt); // Hoist all expressions in "blocks" that are invariant in loop "loopNum" (an index into the optLoopTable) // outside of that loop. Exempt expressions whose value number is in "m_hoistedInParentLoops"; add VN's of hoisted // expressions to "hoistInLoop". void optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext); // Return true if the tree looks profitable to hoist out of loop 'lnum'. bool optIsProfitableToHoistTree(GenTree* tree, unsigned lnum); // Performs the hoisting 'tree' into the PreHeader for loop 'lnum' void optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt); // Returns true iff the ValueNum "vn" represents a value that is loop-invariant in "lnum". // Constants and init values are always loop invariant. // VNPhi's connect VN's to the SSA definition, so we can know if the SSA def occurs in the loop. bool optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* recordedVNs); // If "blk" is the entry block of a natural loop, returns true and sets "*pLnum" to the index of the loop // in the loop table. bool optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum); // Records the set of "side effects" of all loops: fields (object instance and static) // written to, and SZ-array element type equivalence classes updated. void optComputeLoopSideEffects(); #ifdef DEBUG bool optAnyChildNotRemoved(unsigned loopNum); #endif // DEBUG // Mark a loop as removed. void optMarkLoopRemoved(unsigned loopNum); private: // Requires "lnum" to be the index of an outermost loop in the loop table. Traverses the body of that loop, // including all nested loops, and records the set of "side effects" of the loop: fields (object instance and // static) written to, and SZ-array element type equivalence classes updated. void optComputeLoopNestSideEffects(unsigned lnum); // Given a loop number 'lnum' mark it and any nested loops as having 'memoryHavoc' void optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc); // Add the side effects of "blk" (which is required to be within a loop) to all loops of which it is a part. // Returns false if we encounter a block that is not marked as being inside a loop. // bool optComputeLoopSideEffectsOfBlock(BasicBlock* blk); // Hoist the expression "expr" out of loop "lnum". void optPerformHoistExpr(GenTree* expr, BasicBlock* exprBb, unsigned lnum); public: void optOptimizeBools(); public: PhaseStatus optInvertLoops(); // Invert loops so they're entered at top and tested at bottom. PhaseStatus optOptimizeLayout(); // Optimize the BasicBlock layout of the method PhaseStatus optSetBlockWeights(); PhaseStatus optFindLoopsPhase(); // Finds loops and records them in the loop table void optFindLoops(); PhaseStatus optCloneLoops(); void optCloneLoop(unsigned loopInd, LoopCloneContext* context); void optEnsureUniqueHead(unsigned loopInd, weight_t ambientWeight); PhaseStatus optUnrollLoops(); // Unrolls loops (needs to have cost info) void optRemoveRedundantZeroInits(); protected: // This enumeration describes what is killed by a call. enum callInterf { CALLINT_NONE, // no interference (most helpers) CALLINT_REF_INDIRS, // kills GC ref indirections (SETFIELD OBJ) CALLINT_SCL_INDIRS, // kills non GC ref indirections (SETFIELD non-OBJ) CALLINT_ALL_INDIRS, // kills both GC ref and non GC ref indirections (SETFIELD STRUCT) CALLINT_ALL, // kills everything (normal method call) }; enum class FieldKindForVN { SimpleStatic, WithBaseAddr }; public: // A "LoopDsc" describes a ("natural") loop. We (currently) require the body of a loop to be a contiguous (in // bbNext order) sequence of basic blocks. (At times, we may require the blocks in a loop to be "properly numbered" // in bbNext order; we use comparisons on the bbNum to decide order.) // The blocks that define the body are // top <= entry <= bottom // The "head" of the loop is a block outside the loop that has "entry" as a successor. We only support loops with a // single 'head' block. The meanings of these blocks are given in the definitions below. Also see the picture at // Compiler::optFindNaturalLoops(). struct LoopDsc { BasicBlock* lpHead; // HEAD of the loop (not part of the looping of the loop) -- has ENTRY as a successor. BasicBlock* lpTop; // loop TOP (the back edge from lpBottom reaches here). Lexically first block (in bbNext // order) reachable in this loop. BasicBlock* lpEntry; // the ENTRY in the loop (in most cases TOP or BOTTOM) BasicBlock* lpBottom; // loop BOTTOM (from here we have a back edge to the TOP) BasicBlock* lpExit; // if a single exit loop this is the EXIT (in most cases BOTTOM) callInterf lpAsgCall; // "callInterf" for calls in the loop ALLVARSET_TP lpAsgVars; // set of vars assigned within the loop (all vars, not just tracked) varRefKinds lpAsgInds : 8; // set of inds modified within the loop LoopFlags lpFlags; unsigned char lpExitCnt; // number of exits from the loop unsigned char lpParent; // The index of the most-nested loop that completely contains this one, // or else BasicBlock::NOT_IN_LOOP if no such loop exists. unsigned char lpChild; // The index of a nested loop, or else BasicBlock::NOT_IN_LOOP if no child exists. // (Actually, an "immediately" nested loop -- // no other child of this loop is a parent of lpChild.) unsigned char lpSibling; // The index of another loop that is an immediate child of lpParent, // or else BasicBlock::NOT_IN_LOOP. One can enumerate all the children of a loop // by following "lpChild" then "lpSibling" links. bool lpLoopHasMemoryHavoc[MemoryKindCount]; // The loop contains an operation that we assume has arbitrary // memory side effects. If this is set, the fields below // may not be accurate (since they become irrelevant.) VARSET_TP lpVarInOut; // The set of variables that are IN or OUT during the execution of this loop VARSET_TP lpVarUseDef; // The set of variables that are USE or DEF during the execution of this loop // The following counts are used for hoisting profitability checks. int lpHoistedExprCount; // The register count for the non-FP expressions from inside this loop that have been // hoisted int lpLoopVarCount; // The register count for the non-FP LclVars that are read/written inside this loop int lpVarInOutCount; // The register count for the non-FP LclVars that are alive inside or across this loop int lpHoistedFPExprCount; // The register count for the FP expressions from inside this loop that have been // hoisted int lpLoopVarFPCount; // The register count for the FP LclVars that are read/written inside this loop int lpVarInOutFPCount; // The register count for the FP LclVars that are alive inside or across this loop typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>, FieldKindForVN> FieldHandleSet; FieldHandleSet* lpFieldsModified; // This has entries for all static field and object instance fields modified // in the loop. typedef JitHashTable<CORINFO_CLASS_HANDLE, JitPtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>, bool> ClassHandleSet; ClassHandleSet* lpArrayElemTypesModified; // Bits set indicate the set of sz array element types such that // arrays of that type are modified // in the loop. // Adds the variable liveness information for 'blk' to 'this' LoopDsc void AddVariableLiveness(Compiler* comp, BasicBlock* blk); inline void AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // This doesn't *always* take a class handle -- it can also take primitive types, encoded as class handles // (shifted left, with a low-order bit set to distinguish.) // Use the {Encode/Decode}ElemType methods to construct/destruct these. inline void AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd); /* The following values are set only for iterator loops, i.e. has the flag LPFLG_ITER set */ GenTree* lpIterTree; // The "i = i <op> const" tree unsigned lpIterVar() const; // iterator variable # int lpIterConst() const; // the constant with which the iterator is incremented genTreeOps lpIterOper() const; // the type of the operation on the iterator (ASG_ADD, ASG_SUB, etc.) void VERIFY_lpIterTree() const; var_types lpIterOperType() const; // For overflow instructions // Set to the block where we found the initialization for LPFLG_CONST_INIT loops. // Initially, this will be 'head', but 'head' might change if we insert a loop pre-header block. BasicBlock* lpInitBlock; int lpConstInit; // initial constant value of iterator : Valid if LPFLG_CONST_INIT // The following is for LPFLG_ITER loops only (i.e. the loop condition is "i RELOP const or var") GenTree* lpTestTree; // pointer to the node containing the loop test genTreeOps lpTestOper() const; // the type of the comparison between the iterator and the limit (GT_LE, GT_GE, // etc.) void VERIFY_lpTestTree() const; bool lpIsReversed() const; // true if the iterator node is the second operand in the loop condition GenTree* lpIterator() const; // the iterator node in the loop test GenTree* lpLimit() const; // the limit node in the loop test // Limit constant value of iterator - loop condition is "i RELOP const" // : Valid if LPFLG_CONST_LIMIT int lpConstLimit() const; // The lclVar # in the loop condition ( "i RELOP lclVar" ) // : Valid if LPFLG_VAR_LIMIT unsigned lpVarLimit() const; // The array length in the loop condition ( "i RELOP arr.len" or "i RELOP arr[i][j].len" ) // : Valid if LPFLG_ARRLEN_LIMIT bool lpArrLenLimit(Compiler* comp, ArrIndex* index) const; // Returns "true" iff this is a "top entry" loop. bool lpIsTopEntry() const { if (lpHead->bbNext == lpEntry) { assert(lpHead->bbFallsThrough()); assert(lpTop == lpEntry); return true; } else { return false; } } // Returns "true" iff "*this" contains the blk. bool lpContains(BasicBlock* blk) const { return lpTop->bbNum <= blk->bbNum && blk->bbNum <= lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains the range [top, bottom] (allowing tops // to be equal, but requiring bottoms to be different.) bool lpContains(BasicBlock* top, BasicBlock* bottom) const { return lpTop->bbNum <= top->bbNum && bottom->bbNum < lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains "lp2" (allowing tops to be equal, but requiring // bottoms to be different.) bool lpContains(const LoopDsc& lp2) const { return lpContains(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is (properly) contained by the range [top, bottom] // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(BasicBlock* top, BasicBlock* bottom) const { return top->bbNum <= lpTop->bbNum && lpBottom->bbNum < bottom->bbNum; } // Returns "true" iff "*this" is (properly) contained by "lp2" // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(const LoopDsc& lp2) const { return lpContainedBy(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is disjoint from the range [top, bottom]. bool lpDisjoint(BasicBlock* top, BasicBlock* bottom) const { return bottom->bbNum < lpTop->bbNum || lpBottom->bbNum < top->bbNum; } // Returns "true" iff "*this" is disjoint from "lp2". bool lpDisjoint(const LoopDsc& lp2) const { return lpDisjoint(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff the loop is well-formed (see code for defn). bool lpWellFormed() const { return lpTop->bbNum <= lpEntry->bbNum && lpEntry->bbNum <= lpBottom->bbNum && (lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum); } #ifdef DEBUG void lpValidatePreHeader() const { // If this is called, we expect there to be a pre-header. assert(lpFlags & LPFLG_HAS_PREHEAD); // The pre-header must unconditionally enter the loop. assert(lpHead->GetUniqueSucc() == lpEntry); // The loop block must be marked as a pre-header. assert(lpHead->bbFlags & BBF_LOOP_PREHEADER); // The loop entry must have a single non-loop predecessor, which is the pre-header. // We can't assume here that the bbNum are properly ordered, so we can't do a simple lpContained() // check. So, we defer this check, which will be done by `fgDebugCheckLoopTable()`. } #endif // DEBUG // LoopBlocks: convenience method for enabling range-based `for` iteration over all the // blocks in a loop, e.g.: // for (BasicBlock* const block : loop->LoopBlocks()) ... // Currently, the loop blocks are expected to be in linear, lexical, `bbNext` order // from `lpTop` through `lpBottom`, inclusive. All blocks in this range are considered // to be part of the loop. // BasicBlockRangeList LoopBlocks() const { return BasicBlockRangeList(lpTop, lpBottom); } }; protected: bool fgMightHaveLoop(); // returns true if there are any back edges bool fgHasLoops; // True if this method has any loops, set in fgComputeReachability public: LoopDsc* optLoopTable; // loop descriptor table unsigned char optLoopCount; // number of tracked loops unsigned char loopAlignCandidates; // number of loops identified for alignment // Every time we rebuild the loop table, we increase the global "loop epoch". Any loop indices or // loop table pointers from the previous epoch are invalid. // TODO: validate this in some way? unsigned optCurLoopEpoch; void NewLoopEpoch() { ++optCurLoopEpoch; JITDUMP("New loop epoch %d\n", optCurLoopEpoch); } #ifdef DEBUG unsigned char loopsAligned; // number of loops actually aligned #endif // DEBUG bool optRecordLoop(BasicBlock* head, BasicBlock* top, BasicBlock* entry, BasicBlock* bottom, BasicBlock* exit, unsigned char exitCnt); void optClearLoopIterInfo(); #ifdef DEBUG void optPrintLoopInfo(unsigned lnum, bool printVerbose = false); void optPrintLoopInfo(const LoopDsc* loop, bool printVerbose = false); void optPrintLoopTable(); #endif protected: unsigned optCallCount; // number of calls made in the method unsigned optIndirectCallCount; // number of virtual, interface and indirect calls made in the method unsigned optNativeCallCount; // number of Pinvoke/Native calls made in the method unsigned optLoopsCloned; // number of loops cloned in the current method. #ifdef DEBUG void optCheckPreds(); #endif void optResetLoopInfo(); void optFindAndScaleGeneralLoopBlocks(); // Determine if there are any potential loops, and set BBF_LOOP_HEAD on potential loop heads. void optMarkLoopHeads(); void optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop = false); bool optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt); unsigned optIsLoopIncrTree(GenTree* incr); bool optCheckIterInLoopTest(unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar); bool optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar); bool optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar); bool optExtractInitTestIncr( BasicBlock* head, BasicBlock* bottom, BasicBlock* exit, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr); void optFindNaturalLoops(); void optIdentifyLoopsForAlignment(); // Ensures that all the loops in the loop nest rooted at "loopInd" (an index into the loop table) are 'canonical' -- // each loop has a unique "top." Returns "true" iff the flowgraph has been modified. bool optCanonicalizeLoopNest(unsigned char loopInd); // Ensures that the loop "loopInd" (an index into the loop table) is 'canonical' -- it has a unique "top," // unshared with any other loop. Returns "true" iff the flowgraph has been modified bool optCanonicalizeLoop(unsigned char loopInd); // Requires "l1" to be a valid loop table index, and not "BasicBlock::NOT_IN_LOOP". // Requires "l2" to be a valid loop table index, or else "BasicBlock::NOT_IN_LOOP". // Returns true iff "l2" is not NOT_IN_LOOP, and "l1" contains "l2". // A loop contains itself. bool optLoopContains(unsigned l1, unsigned l2) const; // Updates the loop table by changing loop "loopInd", whose head is required // to be "from", to be "to". Also performs this transformation for any // loop nested in "loopInd" that shares the same head as "loopInd". void optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to); void optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds = false); // Marks the containsCall information to "lnum" and any parent loops. void AddContainsCallAllContainingLoops(unsigned lnum); // Adds the variable liveness information from 'blk' to "lnum" and any parent loops. void AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk); // Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops. void AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // Adds "elemType" to the set of modified array element types of "lnum" and any parent loops. void AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemType); // Requires that "from" and "to" have the same "bbJumpKind" (perhaps because "to" is a clone // of "from".) Copies the jump destination from "from" to "to". void optCopyBlkDest(BasicBlock* from, BasicBlock* to); // Returns true if 'block' is an entry block for any loop in 'optLoopTable' bool optIsLoopEntry(BasicBlock* block) const; // The depth of the loop described by "lnum" (an index into the loop table.) (0 == top level) unsigned optLoopDepth(unsigned lnum) { assert(lnum < optLoopCount); unsigned depth = 0; while ((lnum = optLoopTable[lnum].lpParent) != BasicBlock::NOT_IN_LOOP) { ++depth; } return depth; } // Struct used in optInvertWhileLoop to count interesting constructs to boost the profitability score. struct OptInvertCountTreeInfoType { int sharedStaticHelperCount; int arrayLengthCount; }; static fgWalkResult optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data); bool optInvertWhileLoop(BasicBlock* block); private: static bool optIterSmallOverflow(int iterAtExit, var_types incrType); static bool optIterSmallUnderflow(int iterAtExit, var_types decrType); bool optComputeLoopRep(int constInit, int constLimit, int iterInc, genTreeOps iterOper, var_types iterType, genTreeOps testOper, bool unsignedTest, bool dupCond, unsigned* iterCount); static fgWalkPreFn optIsVarAssgCB; protected: bool optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var); bool optIsVarAssgLoop(unsigned lnum, unsigned var); int optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds = VR_NONE); bool optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit); protected: // The following is the upper limit on how many expressions we'll keep track // of for the CSE analysis. // static const unsigned MAX_CSE_CNT = EXPSET_SZ; static const int MIN_CSE_COST = 2; // BitVec trait information only used by the optCSE_canSwap() method, for the CSE_defMask and CSE_useMask. // This BitVec uses one bit per CSE candidate BitVecTraits* cseMaskTraits; // one bit per CSE candidate // BitVec trait information for computing CSE availability using the CSE_DataFlow algorithm. // Two bits are allocated per CSE candidate to compute CSE availability // plus an extra bit to handle the initial unvisited case. // (See CSE_DataFlow::EndMerge for an explanation of why this is necessary.) // // The two bits per CSE candidate have the following meanings: // 11 - The CSE is available, and is also available when considering calls as killing availability. // 10 - The CSE is available, but is not available when considering calls as killing availability. // 00 - The CSE is not available // 01 - An illegal combination // BitVecTraits* cseLivenessTraits; //----------------------------------------------------------------------------------------------------------------- // getCSEnum2bit: Return the normalized index to use in the EXPSET_TP for the CSE with the given CSE index. // Each GenTree has a `gtCSEnum` field. Zero is reserved to mean this node is not a CSE, positive values indicate // CSE uses, and negative values indicate CSE defs. The caller must pass a non-zero positive value, as from // GET_CSE_INDEX(). // static unsigned genCSEnum2bit(unsigned CSEnum) { assert((CSEnum > 0) && (CSEnum <= MAX_CSE_CNT)); return CSEnum - 1; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit for a CSE. // static unsigned getCSEAvailBit(unsigned CSEnum) { return genCSEnum2bit(CSEnum) * 2; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailCrossCallBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit // for a CSE considering calls as killing availability bit (see description above). // static unsigned getCSEAvailCrossCallBit(unsigned CSEnum) { return getCSEAvailBit(CSEnum) + 1; } void optPrintCSEDataFlowSet(EXPSET_VALARG_TP cseDataFlowSet, bool includeBits = true); EXPSET_TP cseCallKillsMask; // Computed once - A mask that is used to kill available CSEs at callsites /* Generic list of nodes - used by the CSE logic */ struct treeLst { treeLst* tlNext; GenTree* tlTree; }; struct treeStmtLst { treeStmtLst* tslNext; GenTree* tslTree; // tree node Statement* tslStmt; // statement containing the tree BasicBlock* tslBlock; // block containing the statement }; // The following logic keeps track of expressions via a simple hash table. struct CSEdsc { CSEdsc* csdNextInBucket; // used by the hash table size_t csdHashKey; // the orginal hashkey ssize_t csdConstDefValue; // When we CSE similar constants, this is the value that we use as the def ValueNum csdConstDefVN; // When we CSE similar constants, this is the ValueNumber that we use for the LclVar // assignment unsigned csdIndex; // 1..optCSECandidateCount bool csdIsSharedConst; // true if this CSE is a shared const bool csdLiveAcrossCall; unsigned short csdDefCount; // definition count unsigned short csdUseCount; // use count (excluding the implicit uses at defs) weight_t csdDefWtCnt; // weighted def count weight_t csdUseWtCnt; // weighted use count (excluding the implicit uses at defs) GenTree* csdTree; // treenode containing the 1st occurrence Statement* csdStmt; // stmt containing the 1st occurrence BasicBlock* csdBlock; // block containing the 1st occurrence treeStmtLst* csdTreeList; // list of matching tree nodes: head treeStmtLst* csdTreeLast; // list of matching tree nodes: tail // ToDo: This can be removed when gtGetStructHandleIfPresent stops guessing // and GT_IND nodes always have valid struct handle. // CORINFO_CLASS_HANDLE csdStructHnd; // The class handle, currently needed to create a SIMD LclVar in PerformCSE bool csdStructHndMismatch; ValueNum defExcSetPromise; // The exception set that is now required for all defs of this CSE. // This will be set to NoVN if we decide to abandon this CSE ValueNum defExcSetCurrent; // The set of exceptions we currently can use for CSE uses. ValueNum defConservNormVN; // if all def occurrences share the same conservative normal value // number, this will reflect it; otherwise, NoVN. // not used for shared const CSE's }; static const size_t s_optCSEhashSizeInitial; static const size_t s_optCSEhashGrowthFactor; static const size_t s_optCSEhashBucketSize; size_t optCSEhashSize; // The current size of hashtable size_t optCSEhashCount; // Number of entries in hashtable size_t optCSEhashMaxCountBeforeResize; // Number of entries before resize CSEdsc** optCSEhash; CSEdsc** optCSEtab; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, GenTree*> NodeToNodeMap; NodeToNodeMap* optCseCheckedBoundMap; // Maps bound nodes to ancestor compares that should be // re-numbered with the bound to improve range check elimination // Given a compare, look for a cse candidate checked bound feeding it and add a map entry if found. void optCseUpdateCheckedBoundMap(GenTree* compare); void optCSEstop(); CSEdsc* optCSEfindDsc(unsigned index); bool optUnmarkCSE(GenTree* tree); // user defined callback data for the tree walk function optCSE_MaskHelper() struct optCSE_MaskData { EXPSET_TP CSE_defMask; EXPSET_TP CSE_useMask; }; // Treewalk helper for optCSE_DefMask and optCSE_UseMask static fgWalkPreFn optCSE_MaskHelper; // This function walks all the node for an given tree // and return the mask of CSE definitions and uses for the tree // void optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData); // Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2. bool optCSE_canSwap(GenTree* firstNode, GenTree* secondNode); struct optCSEcostCmpEx { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; struct optCSEcostCmpSz { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; void optCleanupCSEs(); #ifdef DEBUG void optEnsureClearCSEInfo(); #endif // DEBUG static bool Is_Shared_Const_CSE(size_t key) { return ((key & TARGET_SIGN_BIT) != 0); } // returns the encoded key static size_t Encode_Shared_Const_CSE_Value(size_t key) { return TARGET_SIGN_BIT | (key >> CSE_CONST_SHARED_LOW_BITS); } // returns the orginal key static size_t Decode_Shared_Const_CSE_Value(size_t enckey) { assert(Is_Shared_Const_CSE(enckey)); return (enckey & ~TARGET_SIGN_BIT) << CSE_CONST_SHARED_LOW_BITS; } /************************************************************************** * Value Number based CSEs *************************************************************************/ // String to use for formatting CSE numbers. Note that this is the positive number, e.g., from GET_CSE_INDEX(). #define FMT_CSE "CSE #%02u" public: void optOptimizeValnumCSEs(); protected: void optValnumCSE_Init(); unsigned optValnumCSE_Index(GenTree* tree, Statement* stmt); bool optValnumCSE_Locate(); void optValnumCSE_InitDataFlow(); void optValnumCSE_DataFlow(); void optValnumCSE_Availablity(); void optValnumCSE_Heuristic(); bool optDoCSE; // True when we have found a duplicate CSE tree bool optValnumCSE_phase; // True when we are executing the optOptimizeValnumCSEs() phase unsigned optCSECandidateCount; // Count of CSE's candidates unsigned optCSEstart; // The first local variable number that is a CSE unsigned optCSEcount; // The total count of CSE's introduced. weight_t optCSEweight; // The weight of the current block when we are doing PerformCSE bool optIsCSEcandidate(GenTree* tree); // lclNumIsTrueCSE returns true if the LclVar was introduced by the CSE phase of the compiler // bool lclNumIsTrueCSE(unsigned lclNum) const { return ((optCSEcount > 0) && (lclNum >= optCSEstart) && (lclNum < optCSEstart + optCSEcount)); } // lclNumIsCSE returns true if the LclVar should be treated like a CSE with regards to constant prop. // bool lclNumIsCSE(unsigned lclNum) const { return lvaGetDesc(lclNum)->lvIsCSE; } #ifdef DEBUG bool optConfigDisableCSE(); bool optConfigDisableCSE2(); #endif void optOptimizeCSEs(); struct isVarAssgDsc { GenTree* ivaSkip; ALLVARSET_TP ivaMaskVal; // Set of variables assigned to. This is a set of all vars, not tracked vars. #ifdef DEBUG void* ivaSelf; #endif unsigned ivaVar; // Variable we are interested in, or -1 varRefKinds ivaMaskInd; // What kind of indirect assignments are there? callInterf ivaMaskCall; // What kind of calls are there? bool ivaMaskIncomplete; // Variables not representable in ivaMaskVal were assigned to. }; static callInterf optCallInterf(GenTreeCall* call); public: // VN based copy propagation. // In DEBUG builds, we'd like to know the tree that the SSA definition was pushed for. // While for ordinary SSA defs it will be available (as an ASG) in the SSA descriptor, // for locals which will use "definitions from uses", it will not be, so we store it // in this class instead. class CopyPropSsaDef { LclSsaVarDsc* m_ssaDef; #ifdef DEBUG GenTree* m_defNode; #endif public: CopyPropSsaDef(LclSsaVarDsc* ssaDef, GenTree* defNode) : m_ssaDef(ssaDef) #ifdef DEBUG , m_defNode(defNode) #endif { } LclSsaVarDsc* GetSsaDef() const { return m_ssaDef; } #ifdef DEBUG GenTree* GetDefNode() const { return m_defNode; } #endif }; typedef ArrayStack<CopyPropSsaDef> CopyPropSsaDefStack; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, CopyPropSsaDefStack*> LclNumToLiveDefsMap; // Copy propagation functions. void optCopyProp(Statement* stmt, GenTreeLclVarCommon* tree, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); void optBlockCopyPropPopStacks(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optBlockCopyProp(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optCopyPropPushDef(GenTree* defNode, GenTreeLclVarCommon* lclNode, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); unsigned optIsSsaLocal(GenTreeLclVarCommon* lclNode); int optCopyProp_LclVarScore(const LclVarDsc* lclVarDsc, const LclVarDsc* copyVarDsc, bool preferOp2); void optVnCopyProp(); INDEBUG(void optDumpCopyPropStack(LclNumToLiveDefsMap* curSsaName)); /************************************************************************** * Early value propagation *************************************************************************/ struct SSAName { unsigned m_lvNum; unsigned m_ssaNum; SSAName(unsigned lvNum, unsigned ssaNum) : m_lvNum(lvNum), m_ssaNum(ssaNum) { } static unsigned GetHashCode(SSAName ssaNm) { return (ssaNm.m_lvNum << 16) | (ssaNm.m_ssaNum); } static bool Equals(SSAName ssaNm1, SSAName ssaNm2) { return (ssaNm1.m_lvNum == ssaNm2.m_lvNum) && (ssaNm1.m_ssaNum == ssaNm2.m_ssaNum); } }; #define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array #define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type. #define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores. #define OMF_HAS_NULLCHECK 0x00000008 // Method contains null check. #define OMF_HAS_FATPOINTER 0x00000010 // Method contains call, that needs fat pointer transformation. #define OMF_HAS_OBJSTACKALLOC 0x00000020 // Method contains an object allocated on the stack. #define OMF_HAS_GUARDEDDEVIRT 0x00000040 // Method contains guarded devirtualization candidate #define OMF_HAS_EXPRUNTIMELOOKUP 0x00000080 // Method contains a runtime lookup to an expandable dictionary. #define OMF_HAS_PATCHPOINT 0x00000100 // Method contains patchpoints #define OMF_NEEDS_GCPOLLS 0x00000200 // Method needs GC polls #define OMF_HAS_FROZEN_STRING 0x00000400 // Method has a frozen string (REF constant int), currently only on CoreRT. #define OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT 0x00000800 // Method contains partial compilation patchpoints #define OMF_HAS_TAILCALL_SUCCESSOR 0x00001000 // Method has potential tail call in a non BBJ_RETURN block bool doesMethodHaveFatPointer() { return (optMethodFlags & OMF_HAS_FATPOINTER) != 0; } void setMethodHasFatPointer() { optMethodFlags |= OMF_HAS_FATPOINTER; } void clearMethodHasFatPointer() { optMethodFlags &= ~OMF_HAS_FATPOINTER; } void addFatPointerCandidate(GenTreeCall* call); bool doesMethodHaveFrozenString() const { return (optMethodFlags & OMF_HAS_FROZEN_STRING) != 0; } void setMethodHasFrozenString() { optMethodFlags |= OMF_HAS_FROZEN_STRING; } bool doesMethodHaveGuardedDevirtualization() const { return (optMethodFlags & OMF_HAS_GUARDEDDEVIRT) != 0; } void setMethodHasGuardedDevirtualization() { optMethodFlags |= OMF_HAS_GUARDEDDEVIRT; } void clearMethodHasGuardedDevirtualization() { optMethodFlags &= ~OMF_HAS_GUARDEDDEVIRT; } void considerGuardedDevirtualization(GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)); void addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood); bool doesMethodHaveExpRuntimeLookup() { return (optMethodFlags & OMF_HAS_EXPRUNTIMELOOKUP) != 0; } void setMethodHasExpRuntimeLookup() { optMethodFlags |= OMF_HAS_EXPRUNTIMELOOKUP; } void clearMethodHasExpRuntimeLookup() { optMethodFlags &= ~OMF_HAS_EXPRUNTIMELOOKUP; } void addExpRuntimeLookupCandidate(GenTreeCall* call); bool doesMethodHavePatchpoints() { return (optMethodFlags & OMF_HAS_PATCHPOINT) != 0; } void setMethodHasPatchpoint() { optMethodFlags |= OMF_HAS_PATCHPOINT; } bool doesMethodHavePartialCompilationPatchpoints() { return (optMethodFlags & OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT) != 0; } void setMethodHasPartialCompilationPatchpoint() { optMethodFlags |= OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT; } unsigned optMethodFlags; bool doesMethodHaveNoReturnCalls() { return optNoReturnCallCount > 0; } void setMethodHasNoReturnCalls() { optNoReturnCallCount++; } unsigned optNoReturnCallCount; // Recursion bound controls how far we can go backwards tracking for a SSA value. // No throughput diff was found with backward walk bound between 3-8. static const int optEarlyPropRecurBound = 5; enum class optPropKind { OPK_INVALID, OPK_ARRAYLEN, OPK_NULLCHECK }; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, GenTree*> LocalNumberToNullCheckTreeMap; GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block)); GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth); GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind); GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optDoEarlyPropForBlock(BasicBlock* block); bool optDoEarlyPropForFunc(); void optEarlyProp(); void optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optIsNullCheckFoldingLegal(GenTree* tree, GenTree* nullCheckTree, GenTree** nullCheckParent, Statement** nullCheckStmt); bool optCanMoveNullCheckPastTree(GenTree* tree, unsigned nullCheckLclNum, bool isInsideTry, bool checkSideEffectSummary); #if DEBUG void optCheckFlagsAreSet(unsigned methodFlag, const char* methodFlagStr, unsigned bbFlag, const char* bbFlagStr, GenTree* tree, BasicBlock* basicBlock); #endif // Redundant branch opts // PhaseStatus optRedundantBranches(); bool optRedundantRelop(BasicBlock* const block); bool optRedundantBranch(BasicBlock* const block); bool optJumpThread(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop); bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock); /************************************************************************** * Value/Assertion propagation *************************************************************************/ public: // Data structures for assertion prop BitVecTraits* apTraits; ASSERT_TP apFull; enum optAssertionKind { OAK_INVALID, OAK_EQUAL, OAK_NOT_EQUAL, OAK_SUBRANGE, OAK_NO_THROW, OAK_COUNT }; enum optOp1Kind { O1K_INVALID, O1K_LCLVAR, O1K_ARR_BND, O1K_BOUND_OPER_BND, O1K_BOUND_LOOP_BND, O1K_CONSTANT_LOOP_BND, O1K_CONSTANT_LOOP_BND_UN, O1K_EXACT_TYPE, O1K_SUBTYPE, O1K_VALUE_NUMBER, O1K_COUNT }; enum optOp2Kind { O2K_INVALID, O2K_LCLVAR_COPY, O2K_IND_CNS_INT, O2K_CONST_INT, O2K_CONST_LONG, O2K_CONST_DOUBLE, O2K_ZEROOBJ, O2K_SUBRANGE, O2K_COUNT }; struct AssertionDsc { optAssertionKind assertionKind; struct SsaVar { unsigned lclNum; // assigned to or property of this local var number unsigned ssaNum; }; struct ArrBnd { ValueNum vnIdx; ValueNum vnLen; }; struct AssertionDscOp1 { optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype ValueNum vn; union { SsaVar lcl; ArrBnd bnd; }; } op1; struct AssertionDscOp2 { optOp2Kind kind; // a const or copy assignment ValueNum vn; struct IntVal { ssize_t iconVal; // integer #if !defined(HOST_64BIT) unsigned padding; // unused; ensures iconFlags does not overlap lconVal #endif GenTreeFlags iconFlags; // gtFlags }; union { struct { SsaVar lcl; FieldSeqNode* zeroOffsetFieldSeq; }; IntVal u1; __int64 lconVal; double dconVal; IntegralRange u2; }; } op2; bool IsCheckedBoundArithBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_OPER_BND); } bool IsCheckedBoundBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_LOOP_BND); } bool IsConstantBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND)); } bool IsConstantBoundUnsigned() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND_UN)); } bool IsBoundsCheckNoThrow() { return ((assertionKind == OAK_NO_THROW) && (op1.kind == O1K_ARR_BND)); } bool IsCopyAssertion() { return ((assertionKind == OAK_EQUAL) && (op1.kind == O1K_LCLVAR) && (op2.kind == O2K_LCLVAR_COPY)); } bool IsConstantInt32Assertion() { return ((assertionKind == OAK_EQUAL) || (assertionKind == OAK_NOT_EQUAL)) && (op2.kind == O2K_CONST_INT); } static bool SameKind(AssertionDsc* a1, AssertionDsc* a2) { return a1->assertionKind == a2->assertionKind && a1->op1.kind == a2->op1.kind && a1->op2.kind == a2->op2.kind; } static bool ComplementaryKind(optAssertionKind kind, optAssertionKind kind2) { if (kind == OAK_EQUAL) { return kind2 == OAK_NOT_EQUAL; } else if (kind == OAK_NOT_EQUAL) { return kind2 == OAK_EQUAL; } return false; } bool HasSameOp1(AssertionDsc* that, bool vnBased) { if (op1.kind != that->op1.kind) { return false; } else if (op1.kind == O1K_ARR_BND) { assert(vnBased); return (op1.bnd.vnIdx == that->op1.bnd.vnIdx) && (op1.bnd.vnLen == that->op1.bnd.vnLen); } else { return ((vnBased && (op1.vn == that->op1.vn)) || (!vnBased && (op1.lcl.lclNum == that->op1.lcl.lclNum))); } } bool HasSameOp2(AssertionDsc* that, bool vnBased) { if (op2.kind != that->op2.kind) { return false; } switch (op2.kind) { case O2K_IND_CNS_INT: case O2K_CONST_INT: return ((op2.u1.iconVal == that->op2.u1.iconVal) && (op2.u1.iconFlags == that->op2.u1.iconFlags)); case O2K_CONST_LONG: return (op2.lconVal == that->op2.lconVal); case O2K_CONST_DOUBLE: // exact match because of positive and negative zero. return (memcmp(&op2.dconVal, &that->op2.dconVal, sizeof(double)) == 0); case O2K_ZEROOBJ: return true; case O2K_LCLVAR_COPY: return (op2.lcl.lclNum == that->op2.lcl.lclNum) && (!vnBased || op2.lcl.ssaNum == that->op2.lcl.ssaNum) && (op2.zeroOffsetFieldSeq == that->op2.zeroOffsetFieldSeq); case O2K_SUBRANGE: return op2.u2.Equals(that->op2.u2); case O2K_INVALID: // we will return false break; default: assert(!"Unexpected value for op2.kind in AssertionDsc."); break; } return false; } bool Complementary(AssertionDsc* that, bool vnBased) { return ComplementaryKind(assertionKind, that->assertionKind) && HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } bool Equals(AssertionDsc* that, bool vnBased) { if (assertionKind != that->assertionKind) { return false; } else if (assertionKind == OAK_NO_THROW) { assert(op2.kind == O2K_INVALID); return HasSameOp1(that, vnBased); } else { return HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } } }; protected: static fgWalkPreFn optAddCopiesCallback; static fgWalkPreFn optVNAssertionPropCurStmtVisitor; unsigned optAddCopyLclNum; GenTree* optAddCopyAsgnNode; bool optLocalAssertionProp; // indicates that we are performing local assertion prop bool optAssertionPropagated; // set to true if we modified the trees bool optAssertionPropagatedCurrentStmt; #ifdef DEBUG GenTree* optAssertionPropCurrentTree; #endif AssertionIndex* optComplementaryAssertionMap; JitExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions // using the value of a local var) for each local var AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments AssertionIndex optAssertionCount; // total number of assertions in the assertion table AssertionIndex optMaxAssertionCount; public: void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); fgWalkResult optVNConstantPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test); GenTree* optVNConstantPropOnTree(BasicBlock* block, GenTree* tree); GenTree* optExtractSideEffListFromConst(GenTree* tree); AssertionIndex GetAssertionCount() { return optAssertionCount; } ASSERT_TP* bbJtrueAssertionOut; typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP> ValueNumToAssertsMap; ValueNumToAssertsMap* optValueNumToAsserts; // Assertion prop helpers. ASSERT_TP& GetAssertionDep(unsigned lclNum); AssertionDsc* optGetAssertion(AssertionIndex assertIndex); void optAssertionInit(bool isLocalProp); void optAssertionTraitsInit(AssertionIndex assertionCount); void optAssertionReset(AssertionIndex limit); void optAssertionRemove(AssertionIndex index); // Assertion prop data flow functions. void optAssertionPropMain(); Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt); bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags); ASSERT_TP* optInitAssertionDataflowFlags(); ASSERT_TP* optComputeAssertionGen(); // Assertion Gen functions. void optAssertionGen(GenTree* tree); AssertionIndex optAssertionGenCast(GenTreeCast* cast); AssertionIndex optAssertionGenPhiDefn(GenTree* tree); AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree); AssertionInfo optAssertionGenJtrue(GenTree* tree); AssertionIndex optCreateJtrueAssertions(GenTree* op1, GenTree* op2, Compiler::optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFindComplementary(AssertionIndex assertionIndex); void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index); // Assertion creation functions. AssertionIndex optCreateAssertion(GenTree* op1, GenTree* op2, optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFinalizeCreatingAssertion(AssertionDsc* assertion); bool optTryExtractSubrangeAssertion(GenTree* source, IntegralRange* pRange); void optCreateComplementaryAssertion(AssertionIndex assertionIndex, GenTree* op1, GenTree* op2, bool helperCallArgs = false); bool optAssertionVnInvolvesNan(AssertionDsc* assertion); AssertionIndex optAddAssertion(AssertionDsc* assertion); void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index); #ifdef DEBUG void optPrintVnAssertionMapping(); #endif ASSERT_TP optGetVnMappedAssertions(ValueNum vn); // Used for respective assertion propagations. AssertionIndex optAssertionIsSubrange(GenTree* tree, IntegralRange range, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsSubtype(GenTree* tree, GenTree* methodTableArg, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased)); bool optAssertionIsNonNull(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex)); AssertionIndex optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTree* op1, GenTree* op2); AssertionIndex optGlobalAssertionIsEqualOrNotEqualZero(ASSERT_VALARG_TP assertions, GenTree* op1); AssertionIndex optLocalAssertionIsEqualOrNotEqual( optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions); // Assertion prop for lcl var functions. bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc); GenTree* optCopyAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); GenTree* optConstantAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions); // Assertion propagation functions. GenTree* optAssertionProp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt, BasicBlock* block); GenTree* optAssertionProp_LclVar(ASSERT_VALARG_TP assertions, GenTreeLclVarCommon* tree, Statement* stmt); GenTree* optAssertionProp_Asg(ASSERT_VALARG_TP assertions, GenTreeOp* asg, Statement* stmt); GenTree* optAssertionProp_Return(ASSERT_VALARG_TP assertions, GenTreeUnOp* ret, Statement* stmt); GenTree* optAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Cast(ASSERT_VALARG_TP assertions, GenTreeCast* cast, Statement* stmt); GenTree* optAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call, Statement* stmt); GenTree* optAssertionProp_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Comma(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Update(GenTree* newTree, GenTree* tree, Statement* stmt); GenTree* optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call); // Implied assertion functions. void optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& activeAssertions); void optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions); void optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionDsc* depAssertion, ASSERT_TP& result); void optImpliedByConstAssertion(AssertionDsc* curAssertion, ASSERT_TP& result); #ifdef DEBUG void optPrintAssertion(AssertionDsc* newAssertion, AssertionIndex assertionIndex = 0); void optPrintAssertionIndex(AssertionIndex index); void optPrintAssertionIndices(ASSERT_TP assertions); void optDebugCheckAssertion(AssertionDsc* assertion); void optDebugCheckAssertions(AssertionIndex AssertionIndex); #endif static void optDumpAssertionIndices(const char* header, ASSERT_TP assertions, const char* footer = nullptr); static void optDumpAssertionIndices(ASSERT_TP assertions, const char* footer = nullptr); void optAddCopies(); /************************************************************************** * Range checks *************************************************************************/ public: struct LoopCloneVisitorInfo { LoopCloneContext* context; unsigned loopNum; Statement* stmt; LoopCloneVisitorInfo(LoopCloneContext* context, unsigned loopNum, Statement* stmt) : context(context), loopNum(loopNum), stmt(nullptr) { } }; bool optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum); bool optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optReconstructArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context); static fgWalkPreFn optCanOptimizeByLoopCloningVisitor; fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info); bool optObtainLoopCloningOpts(LoopCloneContext* context); bool optIsLoopClonable(unsigned loopInd); bool optLoopCloningEnabled(); #ifdef DEBUG void optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore); #endif void optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool fastPath)); bool optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context); bool optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context); BasicBlock* optInsertLoopChoiceConditions(LoopCloneContext* context, unsigned loopNum, BasicBlock* slowHead, BasicBlock* insertAfter); protected: ssize_t optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk)); bool optReachWithoutCall(BasicBlock* srcBB, BasicBlock* dstBB); protected: bool optLoopsMarked; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX RegAlloc XX XX XX XX Does the register allocation and puts the remaining lclVars on the stack XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: regNumber raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc); void raMarkStkVars(); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(TARGET_AMD64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); return (type == TYP_SIMD32); } #elif defined(TARGET_ARM64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); // ARM64 ABI FP Callee save registers only require Callee to save lower 8 Bytes // For SIMD types longer than 8 bytes Caller is responsible for saving and restoring Upper bytes. return ((type == TYP_SIMD16) || (type == TYP_SIMD12)); } #else // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #error("Unknown target architecture for FEATURE_SIMD") #endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE protected: // Some things are used by both LSRA and regpredict allocators. FrameType rpFrameType; bool rpMustCreateEBPCalled; // Set to true after we have called rpMustCreateEBPFrame once bool rpMustCreateEBPFrame(INDEBUG(const char** wbReason)); private: Lowering* m_pLowering; // Lowering; needed to Lower IR that's added or modified after Lowering. LinearScanInterface* m_pLinearScan; // Linear Scan allocator /* raIsVarargsStackArg is called by raMaskStkVars and by lvaComputeRefCounts. It identifies the special case where a varargs function has a parameter passed on the stack, other than the special varargs handle. Such parameters require special treatment, because they cannot be tracked by the GC (their offsets in the stack are not known at compile time). */ bool raIsVarargsStackArg(unsigned lclNum) { #ifdef TARGET_X86 LclVarDsc* varDsc = lvaGetDesc(lclNum); assert(varDsc->lvIsParam); return (info.compIsVarArgs && !varDsc->lvIsRegArg && (lclNum != lvaVarargsHandleArg)); #else // TARGET_X86 return false; #endif // TARGET_X86 } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX EEInterface XX XX XX XX Get to the class and method info from the Execution Engine given XX XX tokens for the class and method XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Get handles void eeGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedToken, CORINFO_CALLINFO_FLAGS flags, CORINFO_CALL_INFO* pResult); void eeGetFieldInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS flags, CORINFO_FIELD_INFO* pResult); // Get the flags bool eeIsValueClass(CORINFO_CLASS_HANDLE clsHnd); bool eeIsIntrinsic(CORINFO_METHOD_HANDLE ftn); bool eeIsFieldStatic(CORINFO_FIELD_HANDLE fldHnd); var_types eeGetFieldType(CORINFO_FIELD_HANDLE fldHnd, CORINFO_CLASS_HANDLE* pStructHnd = nullptr); #if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(TRACK_LSRA_STATS) const char* eeGetMethodName(CORINFO_METHOD_HANDLE hnd, const char** className); const char* eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd); unsigned compMethodHash(CORINFO_METHOD_HANDLE methodHandle); bool eeIsNativeMethod(CORINFO_METHOD_HANDLE method); CORINFO_METHOD_HANDLE eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method); #endif var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned); CORINFO_CLASS_HANDLE eeGetArgClass(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE list); CORINFO_CLASS_HANDLE eeGetClassFromContext(CORINFO_CONTEXT_HANDLE context); unsigned eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); static unsigned eeGetArgSizeAlignment(var_types type, bool isFloatHfa); // VOM info, method sigs void eeGetSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetCallSiteSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetMethodSig(CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* retSig, CORINFO_CLASS_HANDLE owner = nullptr); // Method entry-points, instrs CORINFO_METHOD_HANDLE eeMarkNativeTarget(CORINFO_METHOD_HANDLE method); CORINFO_EE_INFO eeInfo; bool eeInfoInitialized; CORINFO_EE_INFO* eeGetEEInfo(); // Gets the offset of a SDArray's first element static unsigned eeGetArrayDataOffset(); // Get the offset of a MDArray's first element static unsigned eeGetMDArrayDataOffset(unsigned rank); // Get the offset of a MDArray's dimension length for a given dimension. static unsigned eeGetMDArrayLengthOffset(unsigned rank, unsigned dimension); // Get the offset of a MDArray's lower bound for a given dimension. static unsigned eeGetMDArrayLowerBoundOffset(unsigned rank, unsigned dimension); GenTree* eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig); // Returns the page size for the target machine as reported by the EE. target_size_t eeGetPageSize() { return (target_size_t)eeGetEEInfo()->osPageSize; } //------------------------------------------------------------------------ // VirtualStubParam: virtual stub dispatch extra parameter (slot address). // // It represents Abi and target specific registers for the parameter. // class VirtualStubParamInfo { public: VirtualStubParamInfo(bool isCoreRTABI) { #if defined(TARGET_X86) reg = REG_EAX; regMask = RBM_EAX; #elif defined(TARGET_AMD64) if (isCoreRTABI) { reg = REG_R10; regMask = RBM_R10; } else { reg = REG_R11; regMask = RBM_R11; } #elif defined(TARGET_ARM) if (isCoreRTABI) { reg = REG_R12; regMask = RBM_R12; } else { reg = REG_R4; regMask = RBM_R4; } #elif defined(TARGET_ARM64) reg = REG_R11; regMask = RBM_R11; #else #error Unsupported or unset target architecture #endif } regNumber GetReg() const { return reg; } _regMask_enum GetRegMask() const { return regMask; } private: regNumber reg; _regMask_enum regMask; }; VirtualStubParamInfo* virtualStubParamInfo; bool IsTargetAbi(CORINFO_RUNTIME_ABI abi) { return eeGetEEInfo()->targetAbi == abi; } bool generateCFIUnwindCodes() { #if defined(FEATURE_CFI_SUPPORT) return TargetOS::IsUnix && IsTargetAbi(CORINFO_CORERT_ABI); #else return false; #endif } // Debugging support - Line number info void eeGetStmtOffsets(); unsigned eeBoundariesCount; ICorDebugInfo::OffsetMapping* eeBoundaries; // Boundaries to report to the EE void eeSetLIcount(unsigned count); void eeSetLIinfo(unsigned which, UNATIVE_OFFSET offs, IPmappingDscKind kind, const ILLocation& loc); void eeSetLIdone(); #ifdef DEBUG static void eeDispILOffs(IL_OFFSET offs); static void eeDispSourceMappingOffs(uint32_t offs); static void eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line); void eeDispLineInfos(); #endif // DEBUG // Debugging support - Local var info void eeGetVars(); unsigned eeVarsCount; struct VarResultInfo { UNATIVE_OFFSET startOffset; UNATIVE_OFFSET endOffset; DWORD varNumber; CodeGenInterface::siVarLoc loc; } * eeVars; void eeSetLVcount(unsigned count); void eeSetLVinfo(unsigned which, UNATIVE_OFFSET startOffs, UNATIVE_OFFSET length, unsigned varNum, const CodeGenInterface::siVarLoc& loc); void eeSetLVdone(); #ifdef DEBUG void eeDispVar(ICorDebugInfo::NativeVarInfo* var); void eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars); #endif // DEBUG // ICorJitInfo wrappers void eeReserveUnwindInfo(bool isFunclet, bool isColdCode, ULONG unwindSize); void eeAllocUnwindInfo(BYTE* pHotCode, BYTE* pColdCode, ULONG startOffset, ULONG endOffset, ULONG unwindSize, BYTE* pUnwindBlock, CorJitFuncKind funcKind); void eeSetEHcount(unsigned cEH); void eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause); WORD eeGetRelocTypeHint(void* target); // ICorStaticInfo wrapper functions bool eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken); #if defined(UNIX_AMD64_ABI) #ifdef DEBUG static void dumpSystemVClassificationType(SystemVClassificationType ct); #endif // DEBUG void eeGetSystemVAmd64PassStructInRegisterDescriptor( /*IN*/ CORINFO_CLASS_HANDLE structHnd, /*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr); #endif // UNIX_AMD64_ABI template <typename ParamType> bool eeRunWithErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithErrorTrapImp(void (*function)(void*), void* param); template <typename ParamType> bool eeRunWithSPMIErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithSPMIErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param); // Utility functions const char* eeGetFieldName(CORINFO_FIELD_HANDLE fieldHnd, const char** classNamePtr = nullptr); #if defined(DEBUG) const WCHAR* eeGetCPString(size_t stringHandle); #endif const char* eeGetClassName(CORINFO_CLASS_HANDLE clsHnd); static CORINFO_METHOD_HANDLE eeFindHelper(unsigned helper); static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method); static bool IsSharedStaticHelper(GenTree* tree); static bool IsGcSafePoint(GenTreeCall* call); static CORINFO_FIELD_HANDLE eeFindJitDataOffs(unsigned jitDataOffs); // returns true/false if 'field' is a Jit Data offset static bool eeIsJitDataOffs(CORINFO_FIELD_HANDLE field); // returns a number < 0 if 'field' is not a Jit Data offset, otherwise the data offset (limited to 2GB) static int eeGetJitDataOffs(CORINFO_FIELD_HANDLE field); /*****************************************************************************/ /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX CodeGenerator XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: CodeGenInterface* codeGen; // Record the instr offset mapping to the generated code jitstd::list<IPmappingDsc> genIPmappings; #ifdef DEBUG jitstd::list<PreciseIPMapping> genPreciseIPmappings; #endif // Managed RetVal - A side hash table meant to record the mapping from a // GT_CALL node to its debug info. This info is used to emit sequence points // that can be used by debugger to determine the native offset at which the // managed RetVal will be available. // // In fact we can store debug info in a GT_CALL node. This was ruled out in // favor of a side table for two reasons: 1) We need debug info for only those // GT_CALL nodes (created during importation) that correspond to an IL call and // whose return type is other than TYP_VOID. 2) GT_CALL node is a frequently used // structure and IL offset is needed only when generating debuggable code. Therefore // it is desirable to avoid memory size penalty in retail scenarios. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, DebugInfo> CallSiteDebugInfoTable; CallSiteDebugInfoTable* genCallSite2DebugInfoMap; unsigned genReturnLocal; // Local number for the return value when applicable. BasicBlock* genReturnBB; // jumped to when not optimizing for speed. // The following properties are part of CodeGenContext. Getters are provided here for // convenience and backward compatibility, but the properties can only be set by invoking // the setter on CodeGenContext directly. emitter* GetEmitter() const { return codeGen->GetEmitter(); } bool isFramePointerUsed() const { return codeGen->isFramePointerUsed(); } bool GetInterruptible() { return codeGen->GetInterruptible(); } void SetInterruptible(bool value) { codeGen->SetInterruptible(value); } #if DOUBLE_ALIGN const bool genDoubleAlign() { return codeGen->doDoubleAlign(); } DWORD getCanDoubleAlign(); bool shouldDoubleAlign(unsigned refCntStk, unsigned refCntReg, weight_t refCntWtdReg, unsigned refCntStkParam, weight_t refCntWtdStkDbl); #endif // DOUBLE_ALIGN bool IsFullPtrRegMapRequired() { return codeGen->IsFullPtrRegMapRequired(); } void SetFullPtrRegMapRequired(bool value) { codeGen->SetFullPtrRegMapRequired(value); } // Things that MAY belong either in CodeGen or CodeGenContext #if defined(FEATURE_EH_FUNCLETS) FuncInfoDsc* compFuncInfos; unsigned short compCurrFuncIdx; unsigned short compFuncInfoCount; unsigned short compFuncCount() { assert(fgFuncletsCreated); return compFuncInfoCount; } #else // !FEATURE_EH_FUNCLETS // This is a no-op when there are no funclets! void genUpdateCurrentFunclet(BasicBlock* block) { return; } FuncInfoDsc compFuncInfoRoot; static const unsigned compCurrFuncIdx = 0; unsigned short compFuncCount() { return 1; } #endif // !FEATURE_EH_FUNCLETS FuncInfoDsc* funCurrentFunc(); void funSetCurrentFunc(unsigned funcIdx); FuncInfoDsc* funGetFunc(unsigned funcIdx); unsigned int funGetFuncIdx(BasicBlock* block); // LIVENESS VARSET_TP compCurLife; // current live variables GenTree* compCurLifeTree; // node after which compCurLife has been computed // Compare the given "newLife" with last set of live variables and update // codeGen "gcInfo", siScopes, "regSet" with the new variable's homes/liveness. template <bool ForCodeGen> void compChangeLife(VARSET_VALARG_TP newLife); // Update the GC's masks, register's masks and reports change on variable's homes given a set of // current live variables if changes have happened since "compCurLife". template <bool ForCodeGen> inline void compUpdateLife(VARSET_VALARG_TP newLife); // Gets a register mask that represent the kill set for a helper call since // not all JIT Helper calls follow the standard ABI on the target architecture. regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper); #ifdef TARGET_ARM // Requires that "varDsc" be a promoted struct local variable being passed as an argument, beginning at // "firstArgRegNum", which is assumed to have already been aligned to the register alignment restriction of the // struct type. Adds bits to "*pArgSkippedRegMask" for any argument registers *not* used in passing "varDsc" -- // i.e., internal "holes" caused by internal alignment constraints. For example, if the struct contained an int and // a double, and we at R0 (on ARM), then R1 would be skipped, and the bit for R1 would be added to the mask. void fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask); #endif // TARGET_ARM // If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR // node, else NULL. static GenTreeLclVar* fgIsIndirOfAddrOfLocal(GenTree* tree); // This map is indexed by GT_OBJ nodes that are address of promoted struct variables, which // have been annotated with the GTF_VAR_DEATH flag. If such a node is *not* mapped in this // table, one may assume that all the (tracked) field vars die at this GT_OBJ. Otherwise, // the node maps to a pointer to a VARSET_TP, containing set bits for each of the tracked field // vars of the promoted struct local that go dead at the given node (the set bits are the bits // for the tracked var indices of the field vars, as in a live var set). // // The map is allocated on demand so all map operations should use one of the following three // wrapper methods. NodeToVarsetPtrMap* m_promotedStructDeathVars; NodeToVarsetPtrMap* GetPromotedStructDeathVars() { if (m_promotedStructDeathVars == nullptr) { m_promotedStructDeathVars = new (getAllocator()) NodeToVarsetPtrMap(getAllocator()); } return m_promotedStructDeathVars; } void ClearPromotedStructDeathVars() { if (m_promotedStructDeathVars != nullptr) { m_promotedStructDeathVars->RemoveAll(); } } bool LookupPromotedStructDeathVars(GenTree* tree, VARSET_TP** bits) { *bits = nullptr; bool result = false; if (m_promotedStructDeathVars != nullptr) { result = m_promotedStructDeathVars->Lookup(tree, bits); } return result; } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX UnwindInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #if !defined(__GNUC__) #pragma region Unwind information #endif public: // // Infrastructure functions: start/stop/reserve/emit. // void unwindBegProlog(); void unwindEndProlog(); void unwindBegEpilog(); void unwindEndEpilog(); void unwindReserve(); void unwindEmit(void* pHotCode, void* pColdCode); // // Specific unwind information functions: called by code generation to indicate a particular // prolog or epilog unwindable instruction has been generated. // void unwindPush(regNumber reg); void unwindAllocStack(unsigned size); void unwindSetFrameReg(regNumber reg, unsigned offset); void unwindSaveReg(regNumber reg, unsigned offset); #if defined(TARGET_ARM) void unwindPushMaskInt(regMaskTP mask); void unwindPushMaskFloat(regMaskTP mask); void unwindPopMaskInt(regMaskTP mask); void unwindPopMaskFloat(regMaskTP mask); void unwindBranch16(); // The epilog terminates with a 16-bit branch (e.g., "bx lr") void unwindNop(unsigned codeSizeInBytes); // Generate unwind NOP code. 'codeSizeInBytes' is 2 or 4 bytes. Only // called via unwindPadding(). void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. #endif // TARGET_ARM #if defined(TARGET_ARM64) void unwindNop(); void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. void unwindSaveReg(regNumber reg, int offset); // str reg, [sp, #offset] void unwindSaveRegPreindexed(regNumber reg, int offset); // str reg, [sp, #offset]! void unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset] void unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]! void unwindSaveNext(); // unwind code: save_next void unwindReturn(regNumber reg); // ret lr #endif // defined(TARGET_ARM64) // // Private "helper" functions for the unwind implementation. // private: #if defined(FEATURE_EH_FUNCLETS) void unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData, /* OUT */ emitLocation** ppStartLoc, /* OUT */ emitLocation** ppEndLoc); #endif // FEATURE_EH_FUNCLETS void unwindReserveFunc(FuncInfoDsc* func); void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS)) void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode); void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode); #endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS) UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func); #if defined(TARGET_AMD64) void unwindBegPrologWindows(); void unwindPushWindows(regNumber reg); void unwindAllocStackWindows(unsigned size); void unwindSetFrameRegWindows(regNumber reg, unsigned offset); void unwindSaveRegWindows(regNumber reg, unsigned offset); #ifdef UNIX_AMD64_ABI void unwindSaveRegCFI(regNumber reg, unsigned offset); #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM) void unwindPushPopMaskInt(regMaskTP mask, bool useOpsize16); void unwindPushPopMaskFloat(regMaskTP mask); #endif // TARGET_ARM #if defined(FEATURE_CFI_SUPPORT) short mapRegNumToDwarfReg(regNumber reg); void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0); void unwindPushPopCFI(regNumber reg); void unwindBegPrologCFI(); void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat); void unwindAllocStackCFI(unsigned size); void unwindSetFrameRegCFI(regNumber reg, unsigned offset); void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #ifdef DEBUG void DumpCfiInfo(bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET endOffset, DWORD cfiCodeBytes, const CFI_CODE* const pCfiCode); #endif #endif // FEATURE_CFI_SUPPORT #if !defined(__GNUC__) #pragma endregion // Note: region is NOT under !defined(__GNUC__) #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX SIMD XX XX XX XX Info about SIMD types, methods and the SIMD assembly (i.e. the assembly XX XX that contains the distinguished, well-known SIMD type definitions). XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ bool IsBaselineSimdIsaSupported() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compOpportunisticallyDependsOn(minimumIsa); #else return false; #endif } #if defined(DEBUG) bool IsBaselineSimdIsaSupportedDebugOnly() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compIsaSupportedDebugOnly(minimumIsa); #else return false; #endif // FEATURE_SIMD } #endif // DEBUG // Get highest available level for SIMD codegen SIMDLevel getSIMDSupportLevel() { #if defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX2)) { return SIMD_AVX2_Supported; } if (compOpportunisticallyDependsOn(InstructionSet_SSE42)) { return SIMD_SSE4_Supported; } // min bar is SSE2 return SIMD_SSE2_Supported; #else assert(!"Available instruction set(s) for SIMD codegen is not defined for target arch"); unreached(); return SIMD_Not_Supported; #endif } bool isIntrinsicType(CORINFO_CLASS_HANDLE clsHnd) { return info.compCompHnd->isIntrinsicType(clsHnd); } const char* getClassNameFromMetadata(CORINFO_CLASS_HANDLE cls, const char** namespaceName) { return info.compCompHnd->getClassNameFromMetadata(cls, namespaceName); } CORINFO_CLASS_HANDLE getTypeInstantiationArgument(CORINFO_CLASS_HANDLE cls, unsigned index) { return info.compCompHnd->getTypeInstantiationArgument(cls, index); } #ifdef FEATURE_SIMD // Have we identified any SIMD types? // This is currently used by struct promotion to avoid getting type information for a struct // field to see if it is a SIMD type, if we haven't seen any SIMD types or operations in // the method. bool _usesSIMDTypes; bool usesSIMDTypes() { return _usesSIMDTypes; } void setUsesSIMDTypes(bool value) { _usesSIMDTypes = value; } // This is a temp lclVar allocated on the stack as TYP_SIMD. It is used to implement intrinsics // that require indexed access to the individual fields of the vector, which is not well supported // by the hardware. It is allocated when/if such situations are encountered during Lowering. unsigned lvaSIMDInitTempVarNum; struct SIMDHandlesCache { // SIMD Types CORINFO_CLASS_HANDLE SIMDFloatHandle; CORINFO_CLASS_HANDLE SIMDDoubleHandle; CORINFO_CLASS_HANDLE SIMDIntHandle; CORINFO_CLASS_HANDLE SIMDUShortHandle; CORINFO_CLASS_HANDLE SIMDUByteHandle; CORINFO_CLASS_HANDLE SIMDShortHandle; CORINFO_CLASS_HANDLE SIMDByteHandle; CORINFO_CLASS_HANDLE SIMDLongHandle; CORINFO_CLASS_HANDLE SIMDUIntHandle; CORINFO_CLASS_HANDLE SIMDULongHandle; CORINFO_CLASS_HANDLE SIMDNIntHandle; CORINFO_CLASS_HANDLE SIMDNUIntHandle; CORINFO_CLASS_HANDLE SIMDVector2Handle; CORINFO_CLASS_HANDLE SIMDVector3Handle; CORINFO_CLASS_HANDLE SIMDVector4Handle; CORINFO_CLASS_HANDLE SIMDVectorHandle; #ifdef FEATURE_HW_INTRINSICS #if defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector64FloatHandle; CORINFO_CLASS_HANDLE Vector64DoubleHandle; CORINFO_CLASS_HANDLE Vector64IntHandle; CORINFO_CLASS_HANDLE Vector64UShortHandle; CORINFO_CLASS_HANDLE Vector64UByteHandle; CORINFO_CLASS_HANDLE Vector64ShortHandle; CORINFO_CLASS_HANDLE Vector64ByteHandle; CORINFO_CLASS_HANDLE Vector64LongHandle; CORINFO_CLASS_HANDLE Vector64UIntHandle; CORINFO_CLASS_HANDLE Vector64ULongHandle; CORINFO_CLASS_HANDLE Vector64NIntHandle; CORINFO_CLASS_HANDLE Vector64NUIntHandle; #endif // defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector128FloatHandle; CORINFO_CLASS_HANDLE Vector128DoubleHandle; CORINFO_CLASS_HANDLE Vector128IntHandle; CORINFO_CLASS_HANDLE Vector128UShortHandle; CORINFO_CLASS_HANDLE Vector128UByteHandle; CORINFO_CLASS_HANDLE Vector128ShortHandle; CORINFO_CLASS_HANDLE Vector128ByteHandle; CORINFO_CLASS_HANDLE Vector128LongHandle; CORINFO_CLASS_HANDLE Vector128UIntHandle; CORINFO_CLASS_HANDLE Vector128ULongHandle; CORINFO_CLASS_HANDLE Vector128NIntHandle; CORINFO_CLASS_HANDLE Vector128NUIntHandle; #if defined(TARGET_XARCH) CORINFO_CLASS_HANDLE Vector256FloatHandle; CORINFO_CLASS_HANDLE Vector256DoubleHandle; CORINFO_CLASS_HANDLE Vector256IntHandle; CORINFO_CLASS_HANDLE Vector256UShortHandle; CORINFO_CLASS_HANDLE Vector256UByteHandle; CORINFO_CLASS_HANDLE Vector256ShortHandle; CORINFO_CLASS_HANDLE Vector256ByteHandle; CORINFO_CLASS_HANDLE Vector256LongHandle; CORINFO_CLASS_HANDLE Vector256UIntHandle; CORINFO_CLASS_HANDLE Vector256ULongHandle; CORINFO_CLASS_HANDLE Vector256NIntHandle; CORINFO_CLASS_HANDLE Vector256NUIntHandle; #endif // defined(TARGET_XARCH) #endif // FEATURE_HW_INTRINSICS SIMDHandlesCache() { memset(this, 0, sizeof(*this)); } }; SIMDHandlesCache* m_simdHandleCache; // Get an appropriate "zero" for the given type and class handle. GenTree* gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle); // Get the handle for a SIMD type. CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, CorInfoType simdBaseJitType) { if (m_simdHandleCache == nullptr) { // This may happen if the JIT generates SIMD node on its own, without importing them. // Otherwise getBaseJitTypeAndSizeOfSIMDType should have created the cache. return NO_CLASS_HANDLE; } if (simdBaseJitType == CORINFO_TYPE_FLOAT) { switch (simdType) { case TYP_SIMD8: return m_simdHandleCache->SIMDVector2Handle; case TYP_SIMD12: return m_simdHandleCache->SIMDVector3Handle; case TYP_SIMD16: if ((getSIMDVectorType() == TYP_SIMD32) || (m_simdHandleCache->SIMDVector4Handle != NO_CLASS_HANDLE)) { return m_simdHandleCache->SIMDVector4Handle; } break; case TYP_SIMD32: break; default: unreached(); } } assert(emitTypeSize(simdType) <= largestEnregisterableStructSize()); switch (simdBaseJitType) { case CORINFO_TYPE_FLOAT: return m_simdHandleCache->SIMDFloatHandle; case CORINFO_TYPE_DOUBLE: return m_simdHandleCache->SIMDDoubleHandle; case CORINFO_TYPE_INT: return m_simdHandleCache->SIMDIntHandle; case CORINFO_TYPE_USHORT: return m_simdHandleCache->SIMDUShortHandle; case CORINFO_TYPE_UBYTE: return m_simdHandleCache->SIMDUByteHandle; case CORINFO_TYPE_SHORT: return m_simdHandleCache->SIMDShortHandle; case CORINFO_TYPE_BYTE: return m_simdHandleCache->SIMDByteHandle; case CORINFO_TYPE_LONG: return m_simdHandleCache->SIMDLongHandle; case CORINFO_TYPE_UINT: return m_simdHandleCache->SIMDUIntHandle; case CORINFO_TYPE_ULONG: return m_simdHandleCache->SIMDULongHandle; case CORINFO_TYPE_NATIVEINT: return m_simdHandleCache->SIMDNIntHandle; case CORINFO_TYPE_NATIVEUINT: return m_simdHandleCache->SIMDNUIntHandle; default: assert(!"Didn't find a class handle for simdType"); } return NO_CLASS_HANDLE; } // Returns true if this is a SIMD type that should be considered an opaque // vector type (i.e. do not analyze or promote its fields). // Note that all but the fixed vector types are opaque, even though they may // actually be declared as having fields. bool isOpaqueSIMDType(CORINFO_CLASS_HANDLE structHandle) const { return ((m_simdHandleCache != nullptr) && (structHandle != m_simdHandleCache->SIMDVector2Handle) && (structHandle != m_simdHandleCache->SIMDVector3Handle) && (structHandle != m_simdHandleCache->SIMDVector4Handle)); } // Returns true if the tree corresponds to a TYP_SIMD lcl var. // Note that both SIMD vector args and locals are mared as lvSIMDType = true, but // type of an arg node is TYP_BYREF and a local node is TYP_SIMD or TYP_STRUCT. bool isSIMDTypeLocal(GenTree* tree) { return tree->OperIsLocal() && lvaGetDesc(tree->AsLclVarCommon())->lvSIMDType; } // Returns true if the lclVar is an opaque SIMD type. bool isOpaqueSIMDLclVar(const LclVarDsc* varDsc) const { if (!varDsc->lvSIMDType) { return false; } return isOpaqueSIMDType(varDsc->GetStructHnd()); } static bool isRelOpSIMDIntrinsic(SIMDIntrinsicID intrinsicId) { return (intrinsicId == SIMDIntrinsicEqual); } // Returns base JIT type of a TYP_SIMD local. // Returns CORINFO_TYPE_UNDEF if the local is not TYP_SIMD. CorInfoType getBaseJitTypeOfSIMDLocal(GenTree* tree) { if (isSIMDTypeLocal(tree)) { return lvaGetDesc(tree->AsLclVarCommon())->GetSimdBaseJitType(); } return CORINFO_TYPE_UNDEF; } bool isSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Numerics") == 0; } return false; } bool isSIMDClass(typeInfo* pTypeInfo) { return pTypeInfo->IsStruct() && isSIMDClass(pTypeInfo->GetClassHandleForValueClass()); } bool isHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { #ifdef FEATURE_HW_INTRINSICS if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Runtime.Intrinsics") == 0; } #endif // FEATURE_HW_INTRINSICS return false; } bool isHWSIMDClass(typeInfo* pTypeInfo) { #ifdef FEATURE_HW_INTRINSICS return pTypeInfo->IsStruct() && isHWSIMDClass(pTypeInfo->GetClassHandleForValueClass()); #else return false; #endif } bool isSIMDorHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { return isSIMDClass(clsHnd) || isHWSIMDClass(clsHnd); } bool isSIMDorHWSIMDClass(typeInfo* pTypeInfo) { return isSIMDClass(pTypeInfo) || isHWSIMDClass(pTypeInfo); } // Get the base (element) type and size in bytes for a SIMD type. Returns CORINFO_TYPE_UNDEF // if it is not a SIMD type or is an unsupported base JIT type. CorInfoType getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr); CorInfoType getBaseJitTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd) { return getBaseJitTypeAndSizeOfSIMDType(typeHnd, nullptr); } // Get SIMD Intrinsic info given the method handle. // Also sets typeHnd, argCount, baseType and sizeBytes out params. const SIMDIntrinsicInfo* getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* typeHnd, CORINFO_METHOD_HANDLE methodHnd, CORINFO_SIG_INFO* sig, bool isNewObj, unsigned* argCount, CorInfoType* simdBaseJitType, unsigned* sizeBytes); // Pops and returns GenTree node from importers type stack. // Normalizes TYP_STRUCT value in case of GT_CALL, GT_RET_EXPR and arg nodes. GenTree* impSIMDPopStack(var_types type, bool expectAddr = false, CORINFO_CLASS_HANDLE structType = nullptr); // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain given relop result. SIMDIntrinsicID impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId, CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, CorInfoType* inOutBaseJitType, GenTree** op1, GenTree** op2); #if defined(TARGET_XARCH) // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain == comparison result. SIMDIntrinsicID impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, GenTree** op1, GenTree** op2); #endif // defined(TARGET_XARCH) void setLclRelatedToSIMDIntrinsic(GenTree* tree); bool areFieldsContiguous(GenTree* op1, GenTree* op2); bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second); bool areArrayElementsContiguous(GenTree* op1, GenTree* op2); bool areArgumentsContiguous(GenTree* op1, GenTree* op2); GenTree* createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize); // check methodHnd to see if it is a SIMD method that is expanded as an intrinsic in the JIT. GenTree* impSIMDIntrinsic(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef); GenTree* getOp1ForConstructor(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd); // Whether SIMD vector occupies part of SIMD register. // SSE2: vector2f/3f are considered sub register SIMD types. // AVX: vector2f, 3f and 4f are all considered sub register SIMD types. bool isSubRegisterSIMDType(GenTreeSIMD* simdNode) { unsigned vectorRegisterByteLength; #if defined(TARGET_XARCH) // Calling the getSIMDVectorRegisterByteLength api causes the size of Vector<T> to be recorded // with the AOT compiler, so that it cannot change from aot compilation time to runtime // This api does not require such fixing as it merely pertains to the size of the simd type // relative to the Vector<T> size as used at compile time. (So detecting a vector length of 16 here // does not preclude the code from being used on a machine with a larger vector length.) if (getSIMDSupportLevel() < SIMD_AVX2_Supported) { vectorRegisterByteLength = 16; } else { vectorRegisterByteLength = 32; } #else vectorRegisterByteLength = getSIMDVectorRegisterByteLength(); #endif return (simdNode->GetSimdSize() < vectorRegisterByteLength); } // Get the type for the hardware SIMD vector. // This is the maximum SIMD type supported for this target. var_types getSIMDVectorType() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return TYP_SIMD32; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return TYP_SIMD16; } #elif defined(TARGET_ARM64) return TYP_SIMD16; #else assert(!"getSIMDVectorType() unimplemented on target arch"); unreached(); #endif } // Get the size of the SIMD type in bytes int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd) { unsigned sizeBytes = 0; (void)getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes); return sizeBytes; } // Get the the number of elements of baseType of SIMD vector given by its size and baseType static int getSIMDVectorLength(unsigned simdSize, var_types baseType); // Get the the number of elements of baseType of SIMD vector given by its type handle int getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd); // Get preferred alignment of SIMD type. int getSIMDTypeAlignment(var_types simdType); // Get the number of bytes in a System.Numeric.Vector<T> for the current compilation. // Note - cannot be used for System.Runtime.Intrinsic unsigned getSIMDVectorRegisterByteLength() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #elif defined(TARGET_ARM64) return FP_REGSIZE_BYTES; #else assert(!"getSIMDVectorRegisterByteLength() unimplemented on target arch"); unreached(); #endif } // The minimum and maximum possible number of bytes in a SIMD vector. // maxSIMDStructBytes // The minimum SIMD size supported by System.Numeric.Vectors or System.Runtime.Intrinsic // SSE: 16-byte Vector<T> and Vector128<T> // AVX: 32-byte Vector256<T> (Vector<T> is 16-byte) // AVX2: 32-byte Vector<T> and Vector256<T> unsigned int maxSIMDStructBytes() { #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX)) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #else return getSIMDVectorRegisterByteLength(); #endif } unsigned int minSIMDStructBytes() { return emitTypeSize(TYP_SIMD8); } public: // Returns the codegen type for a given SIMD size. static var_types getSIMDTypeForSize(unsigned size) { var_types simdType = TYP_UNDEF; if (size == 8) { simdType = TYP_SIMD8; } else if (size == 12) { simdType = TYP_SIMD12; } else if (size == 16) { simdType = TYP_SIMD16; } else if (size == 32) { simdType = TYP_SIMD32; } else { noway_assert(!"Unexpected size for SIMD type"); } return simdType; } private: unsigned getSIMDInitTempVarNum(var_types simdType); #else // !FEATURE_SIMD bool isOpaqueSIMDLclVar(LclVarDsc* varDsc) { return false; } #endif // FEATURE_SIMD public: //------------------------------------------------------------------------ // largestEnregisterableStruct: The size in bytes of the largest struct that can be enregistered. // // Notes: It is not guaranteed that the struct of this size or smaller WILL be a // candidate for enregistration. unsigned largestEnregisterableStructSize() { #ifdef FEATURE_SIMD #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (opts.IsReadyToRun()) { // Return constant instead of maxSIMDStructBytes, as maxSIMDStructBytes performs // checks that are effected by the current level of instruction set support would // otherwise cause the highest level of instruction set support to be reported to crossgen2. // and this api is only ever used as an optimization or assert, so no reporting should // ever happen. return YMM_REGSIZE_BYTES; } #endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) unsigned vectorRegSize = maxSIMDStructBytes(); assert(vectorRegSize >= TARGET_POINTER_SIZE); return vectorRegSize; #else // !FEATURE_SIMD return TARGET_POINTER_SIZE; #endif // !FEATURE_SIMD } // Use to determine if a struct *might* be a SIMD type. As this function only takes a size, many // structs will fit the criteria. bool structSizeMightRepresentSIMDType(size_t structSize) { #ifdef FEATURE_SIMD // Do not use maxSIMDStructBytes as that api in R2R on X86 and X64 may notify the JIT // about the size of a struct under the assumption that the struct size needs to be recorded. // By using largestEnregisterableStructSize here, the detail of whether or not Vector256<T> is // enregistered or not will not be messaged to the R2R compiler. return (structSize >= minSIMDStructBytes()) && (structSize <= largestEnregisterableStructSize()); #else return false; #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD static bool vnEncodesResultTypeForSIMDIntrinsic(SIMDIntrinsicID intrinsicId); #endif // !FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS static bool vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID); #endif // FEATURE_HW_INTRINSICS private: // These routines need not be enclosed under FEATURE_SIMD since lvIsSIMDType() // is defined for both FEATURE_SIMD and !FEATURE_SIMD apropriately. The use // of this routines also avoids the need of #ifdef FEATURE_SIMD specific code. // Is this var is of type simd struct? bool lclVarIsSIMDType(unsigned varNum) { return lvaGetDesc(varNum)->lvIsSIMDType(); } // Is this Local node a SIMD local? bool lclVarIsSIMDType(GenTreeLclVarCommon* lclVarTree) { return lclVarIsSIMDType(lclVarTree->GetLclNum()); } // Returns true if the TYP_SIMD locals on stack are aligned at their // preferred byte boundary specified by getSIMDTypeAlignment(). // // As per the Intel manual, the preferred alignment for AVX vectors is // 32-bytes. It is not clear whether additional stack space used in // aligning stack is worth the benefit and for now will use 16-byte // alignment for AVX 256-bit vectors with unaligned load/stores to/from // memory. On x86, the stack frame is aligned to 4 bytes. We need to extend // existing support for double (8-byte) alignment to 16 or 32 byte // alignment for frames with local SIMD vars, if that is determined to be // profitable. // // On Amd64 and SysV, RSP+8 is aligned on entry to the function (before // prolog has run). This means that in RBP-based frames RBP will be 16-byte // aligned. For RSP-based frames these are only sometimes aligned, depending // on the frame size. // bool isSIMDTypeLocalAligned(unsigned varNum) { #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES if (lclVarIsSIMDType(varNum) && lvaTable[varNum].lvType != TYP_BYREF) { // TODO-Cleanup: Can't this use the lvExactSize on the varDsc? int alignment = getSIMDTypeAlignment(lvaTable[varNum].lvType); if (alignment <= STACK_ALIGN) { bool rbpBased; int off = lvaFrameAddress(varNum, &rbpBased); // On SysV and Winx64 ABIs RSP+8 will be 16-byte aligned at the // first instruction of a function. If our frame is RBP based // then RBP will always be 16 bytes aligned, so we can simply // check the offset. if (rbpBased) { return (off % alignment) == 0; } // For RSP-based frame the alignment of RSP depends on our // locals. rsp+8 is aligned on entry and we just subtract frame // size so it is not hard to compute. Note that the compiler // tries hard to make sure the frame size means RSP will be // 16-byte aligned, but for leaf functions without locals (i.e. // frameSize = 0) it will not be. int frameSize = codeGen->genTotalFrameSize(); return ((8 - frameSize + off) % alignment) == 0; } } #endif // FEATURE_SIMD return false; } #ifdef DEBUG // Answer the question: Is a particular ISA supported? // Use this api when asking the question so that future // ISA questions can be asked correctly or when asserting // support/nonsupport for an instruction set bool compIsaSupportedDebugOnly(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) return (opts.compSupportsISA & (1ULL << isa)) != 0; #else return false; #endif } #endif // DEBUG bool notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const; // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will exactly match the target machine // on which the function is executed (except for CoreLib, where there are special rules) bool compExactlyDependsOn(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) uint64_t isaBit = (1ULL << isa); if ((opts.compSupportsISAReported & isaBit) == 0) { if (notifyInstructionSetUsage(isa, (opts.compSupportsISA & isaBit) != 0)) ((Compiler*)this)->opts.compSupportsISAExactly |= isaBit; ((Compiler*)this)->opts.compSupportsISAReported |= isaBit; } return (opts.compSupportsISAExactly & isaBit) != 0; #else return false; #endif } // Ensure that code will not execute if an instruction set is usable. Call only // if the instruction set has previously reported as unusable, but when // that that status has not yet been recorded to the AOT compiler void compVerifyInstructionSetUnusable(CORINFO_InstructionSet isa) { // use compExactlyDependsOn to capture are record the use of the isa bool isaUsable = compExactlyDependsOn(isa); // Assert that the is unusable. If true, this function should never be called. assert(!isaUsable); } // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will match the target machine if the result is true // If the result is false, then the target machine may have support for the instruction bool compOpportunisticallyDependsOn(CORINFO_InstructionSet isa) const { if ((opts.compSupportsISA & (1ULL << isa)) != 0) { return compExactlyDependsOn(isa); } else { return false; } } // Answer the question: Is a particular ISA supported for explicit hardware intrinsics? bool compHWIntrinsicDependsOn(CORINFO_InstructionSet isa) const { // Report intent to use the ISA to the EE compExactlyDependsOn(isa); return ((opts.compSupportsISA & (1ULL << isa)) != 0); } bool canUseVexEncoding() const { #ifdef TARGET_XARCH return compOpportunisticallyDependsOn(InstructionSet_AVX); #else return false; #endif } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Generic info about the compilation and the method being compiled. XX XX It is responsible for driving the other phases. XX XX It is also responsible for all the memory management. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: Compiler* InlineeCompiler; // The Compiler instance for the inlinee InlineResult* compInlineResult; // The result of importing the inlinee method. bool compDoAggressiveInlining; // If true, mark every method as CORINFO_FLG_FORCEINLINE bool compJmpOpUsed; // Does the method do a JMP bool compLongUsed; // Does the method use TYP_LONG bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE bool compTailCallUsed; // Does the method do a tailcall bool compTailPrefixSeen; // Does the method IL have tail. prefix bool compLocallocSeen; // Does the method IL have localloc opcode bool compLocallocUsed; // Does the method use localloc. bool compLocallocOptimized; // Does the method have an optimized localloc bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node. bool compHasBackwardJump; // Does the method (or some inlinee) have a lexically backwards jump? bool compHasBackwardJumpInHandler; // Does the method have a lexically backwards jump in a handler? bool compSwitchedToOptimized; // Codegen initially was Tier0 but jit switched to FullOpts bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts bool compSuppressedZeroInit; // There are vars with lvSuppressedZeroInit set // NOTE: These values are only reliable after // the importing is completely finished. #ifdef DEBUG // State information - which phases have completed? // These are kept together for easy discoverability bool bRangeAllowStress; bool compCodeGenDone; int64_t compNumStatementLinksTraversed; // # of links traversed while doing debug checks bool fgNormalizeEHDone; // Has the flowgraph EH normalization phase been done? size_t compSizeEstimate; // The estimated size of the method as per `gtSetEvalOrder`. size_t compCycleEstimate; // The estimated cycle count of the method as per `gtSetEvalOrder` #endif // DEBUG bool fgLocalVarLivenessDone; // Note that this one is used outside of debug. bool fgLocalVarLivenessChanged; bool compLSRADone; bool compRationalIRForm; bool compUsesThrowHelper; // There is a call to a THROW_HELPER for the compiled method. bool compGeneratingProlog; bool compGeneratingEpilog; bool compNeedsGSSecurityCookie; // There is an unsafe buffer (or localloc) on the stack. // Insert cookie on frame and code to check the cookie, like VC++ -GS. bool compGSReorderStackLayout; // There is an unsafe buffer on the stack, reorder locals and make local // copies of susceptible parameters to avoid buffer overrun attacks through locals/params bool getNeedsGSSecurityCookie() const { return compNeedsGSSecurityCookie; } void setNeedsGSSecurityCookie() { compNeedsGSSecurityCookie = true; } FrameLayoutState lvaDoneFrameLayout; // The highest frame layout state that we've completed. During // frame layout calculations, this is the level we are currently // computing. //---------------------------- JITing options ----------------------------- enum codeOptimize { BLENDED_CODE, SMALL_CODE, FAST_CODE, COUNT_OPT_CODE }; struct Options { JitFlags* jitFlags; // all flags passed from the EE // The instruction sets that the compiler is allowed to emit. uint64_t compSupportsISA; // The instruction sets that were reported to the VM as being used by the current method. Subset of // compSupportsISA. uint64_t compSupportsISAReported; // The instruction sets that the compiler is allowed to take advantage of implicitly during optimizations. // Subset of compSupportsISA. // The instruction sets available in compSupportsISA and not available in compSupportsISAExactly can be only // used via explicit hardware intrinsics. uint64_t compSupportsISAExactly; void setSupportedISAs(CORINFO_InstructionSetFlags isas) { compSupportsISA = isas.GetFlagsRaw(); } unsigned compFlags; // method attributes unsigned instrCount; unsigned lvRefCount; codeOptimize compCodeOpt; // what type of code optimizations bool compUseCMOV; // optimize maximally and/or favor speed over size? #define DEFAULT_MIN_OPTS_CODE_SIZE 60000 #define DEFAULT_MIN_OPTS_INSTR_COUNT 20000 #define DEFAULT_MIN_OPTS_BB_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000 // Maximun number of locals before turning off the inlining #define MAX_LV_NUM_COUNT_FOR_INLINING 512 bool compMinOpts; bool compMinOptsIsSet; #ifdef DEBUG mutable bool compMinOptsIsUsed; bool MinOpts() const { assert(compMinOptsIsSet); compMinOptsIsUsed = true; return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #else // !DEBUG bool MinOpts() const { return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #endif // !DEBUG bool OptimizationDisabled() const { return MinOpts() || compDbgCode; } bool OptimizationEnabled() const { return !OptimizationDisabled(); } void SetMinOpts(bool val) { assert(!compMinOptsIsUsed); assert(!compMinOptsIsSet || (compMinOpts == val)); compMinOpts = val; compMinOptsIsSet = true; } // true if the CLFLG_* for an optimization is set. bool OptEnabled(unsigned optFlag) const { return !!(compFlags & optFlag); } #ifdef FEATURE_READYTORUN bool IsReadyToRun() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_READYTORUN); } #else bool IsReadyToRun() const { return false; } #endif // Check if the compilation is control-flow guard enabled. bool IsCFGEnabled() const { #if defined(TARGET_ARM64) || defined(TARGET_AMD64) // On these platforms we assume the register that the target is // passed in is preserved by the validator and take care to get the // target from the register for the call (even in debug mode). static_assert_no_msg((RBM_VALIDATE_INDIRECT_CALL_TRASH & (1 << REG_VALIDATE_INDIRECT_CALL_ADDR)) == 0); if (JitConfig.JitForceControlFlowGuard()) return true; return jitFlags->IsSet(JitFlags::JIT_FLAG_ENABLE_CFG); #else // The remaining platforms are not supported and would require some // work to support. // // ARM32: // The ARM32 validator does not preserve any volatile registers // which means we have to take special care to allocate and use a // callee-saved register (reloading the target from memory is a // security issue). // // x86: // On x86 some VSD calls disassemble the call site and expect an // indirect call which is fundamentally incompatible with CFG. // This would require a different way to pass this information // through. // return false; #endif } #ifdef FEATURE_ON_STACK_REPLACEMENT bool IsOSR() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_OSR); } #else bool IsOSR() const { return false; } #endif // true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating // PInvoke transitions inline. Normally used by R2R, but also used when generating a reverse pinvoke frame, as // the current logic for frame setup initializes and pushes // the InlinedCallFrame before performing the Reverse PInvoke transition, which is invalid (as frames cannot // safely be pushed/popped while the thread is in a preemptive state.). bool ShouldUsePInvokeHelpers() { return jitFlags->IsSet(JitFlags::JIT_FLAG_USE_PINVOKE_HELPERS) || jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } // true if we should use insert the REVERSE_PINVOKE_{ENTER,EXIT} helpers in the method // prolog/epilog bool IsReversePInvoke() { return jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } bool compScopeInfo; // Generate the LocalVar info ? bool compDbgCode; // Generate debugger-friendly code? bool compDbgInfo; // Gather debugging info? bool compDbgEnC; #ifdef PROFILING_SUPPORTED bool compNoPInvokeInlineCB; #else static const bool compNoPInvokeInlineCB; #endif #ifdef DEBUG bool compGcChecks; // Check arguments and return values to ensure they are sane #endif #if defined(DEBUG) && defined(TARGET_XARCH) bool compStackCheckOnRet; // Check stack pointer on return to ensure it is correct. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) bool compStackCheckOnCall; // Check stack pointer after call to ensure it is correct. Only for x86. #endif // defined(DEBUG) && defined(TARGET_X86) bool compReloc; // Generate relocs for pointers in code, true for all ngen/prejit codegen #ifdef DEBUG #if defined(TARGET_XARCH) bool compEnablePCRelAddr; // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible #endif #endif // DEBUG #ifdef UNIX_AMD64_ABI // This flag is indicating if there is a need to align the frame. // On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for // FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called. // On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of // 0. The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that // there are calls and making sure the frame alignment logic is executed. bool compNeedToAlignFrame; #endif // UNIX_AMD64_ABI bool compProcedureSplitting; // Separate cold code from hot code bool genFPorder; // Preserve FP order (operations are non-commutative) bool genFPopt; // Can we do frame-pointer-omission optimization? bool altJit; // True if we are an altjit and are compiling this method #ifdef OPT_CONFIG bool optRepeat; // Repeat optimizer phases k times #endif #ifdef DEBUG bool compProcedureSplittingEH; // Separate cold code from hot code for functions with EH bool dspCode; // Display native code generated bool dspEHTable; // Display the EH table reported to the VM bool dspDebugInfo; // Display the Debug info reported to the VM bool dspInstrs; // Display the IL instructions intermixed with the native code output bool dspLines; // Display source-code lines intermixed with native code output bool dmpHex; // Display raw bytes in hex of native code output bool varNames; // Display variables names in native code output bool disAsm; // Display native code as it is generated bool disAsmSpilled; // Display native code when any register spilling occurs bool disasmWithGC; // Display GC info interleaved with disassembly. bool disDiffable; // Makes the Disassembly code 'diff-able' bool disAddr; // Display process address next to each instruction in disassembly code bool disAlignment; // Display alignment boundaries in disassembly code bool disAsm2; // Display native code after it is generated using external disassembler bool dspOrder; // Display names of each of the methods that we ngen/jit bool dspUnwind; // Display the unwind info output bool dspDiffable; // Makes the Jit Dump 'diff-able' (currently uses same COMPlus_* flag as disDiffable) bool compLongAddress; // Force using large pseudo instructions for long address // (IF_LARGEJMP/IF_LARGEADR/IF_LARGLDC) bool dspGCtbls; // Display the GC tables #endif bool compExpandCallsEarly; // True if we should expand virtual call targets early for this method // Default numbers used to perform loop alignment. All the numbers are choosen // based on experimenting with various benchmarks. // Default minimum loop block weight required to enable loop alignment. #define DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT 4 // By default a loop will be aligned at 32B address boundary to get better // performance as per architecture manuals. #define DEFAULT_ALIGN_LOOP_BOUNDARY 0x20 // For non-adaptive loop alignment, by default, only align a loop whose size is // at most 3 times the alignment block size. If the loop is bigger than that, it is most // likely complicated enough that loop alignment will not impact performance. #define DEFAULT_MAX_LOOPSIZE_FOR_ALIGN DEFAULT_ALIGN_LOOP_BOUNDARY * 3 #ifdef DEBUG // Loop alignment variables // If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary. bool compJitAlignLoopForJcc; #endif // For non-adaptive alignment, minimum loop size (in bytes) for which alignment will be done. unsigned short compJitAlignLoopMaxCodeSize; // Minimum weight needed for the first block of a loop to make it a candidate for alignment. unsigned short compJitAlignLoopMinBlockWeight; // For non-adaptive alignment, address boundary (power of 2) at which loop alignment should // be done. By default, 32B. unsigned short compJitAlignLoopBoundary; // Padding limit to align a loop. unsigned short compJitAlignPaddingLimit; // If set, perform adaptive loop alignment that limits number of padding based on loop size. bool compJitAlignLoopAdaptive; // If set, tries to hide alignment instructions behind unconditional jumps. bool compJitHideAlignBehindJmp; // If set, tracks the hidden return buffer for struct arg. bool compJitOptimizeStructHiddenBuffer; #ifdef LATE_DISASM bool doLateDisasm; // Run the late disassembler #endif // LATE_DISASM #if DUMP_GC_TABLES && !defined(DEBUG) #pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!") static const bool dspGCtbls = true; #endif #ifdef PROFILING_SUPPORTED // Whether to emit Enter/Leave/TailCall hooks using a dummy stub (DummyProfilerELTStub()). // This option helps make the JIT behave as if it is running under a profiler. bool compJitELTHookEnabled; #endif // PROFILING_SUPPORTED #if FEATURE_TAILCALL_OPT // Whether opportunistic or implicit tail call optimization is enabled. bool compTailCallOpt; // Whether optimization of transforming a recursive tail call into a loop is enabled. bool compTailCallLoopOpt; #endif #if FEATURE_FASTTAILCALL // Whether fast tail calls are allowed. bool compFastTailCalls; #endif // FEATURE_FASTTAILCALL #if defined(TARGET_ARM64) // Decision about whether to save FP/LR registers with callee-saved registers (see // COMPlus_JitSaveFpLrWithCalleSavedRegisters). int compJitSaveFpLrWithCalleeSavedRegisters; #endif // defined(TARGET_ARM64) #ifdef CONFIGURABLE_ARM_ABI bool compUseSoftFP = false; #else #ifdef ARM_SOFTFP static const bool compUseSoftFP = true; #else // !ARM_SOFTFP static const bool compUseSoftFP = false; #endif // ARM_SOFTFP #endif // CONFIGURABLE_ARM_ABI } opts; static bool s_pAltJitExcludeAssembliesListInitialized; static AssemblyNamesList2* s_pAltJitExcludeAssembliesList; #ifdef DEBUG static bool s_pJitDisasmIncludeAssembliesListInitialized; static AssemblyNamesList2* s_pJitDisasmIncludeAssembliesList; static bool s_pJitFunctionFileInitialized; static MethodSet* s_pJitMethodSet; #endif // DEBUG #ifdef DEBUG // silence warning of cast to greater size. It is easier to silence than construct code the compiler is happy with, and // it is safe in this case #pragma warning(push) #pragma warning(disable : 4312) template <typename T> T dspPtr(T p) { return (p == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : p); } template <typename T> T dspOffset(T o) { return (o == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : o); } #pragma warning(pop) static int dspTreeID(GenTree* tree) { return tree->gtTreeID; } static void printStmtID(Statement* stmt) { assert(stmt != nullptr); printf(FMT_STMT, stmt->GetID()); } static void printTreeID(GenTree* tree) { if (tree == nullptr) { printf("[------]"); } else { printf("[%06d]", dspTreeID(tree)); } } const char* pgoSourceToString(ICorJitInfo::PgoSource p); const char* devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail); #endif // DEBUG // clang-format off #define STRESS_MODES \ \ STRESS_MODE(NONE) \ \ /* "Variations" stress areas which we try to mix up with each other. */ \ /* These should not be exhaustively used as they might */ \ /* hide/trivialize other areas */ \ \ STRESS_MODE(REGS) \ STRESS_MODE(DBL_ALN) \ STRESS_MODE(LCL_FLDS) \ STRESS_MODE(UNROLL_LOOPS) \ STRESS_MODE(MAKE_CSE) \ STRESS_MODE(LEGACY_INLINE) \ STRESS_MODE(CLONE_EXPR) \ STRESS_MODE(USE_CMOV) \ STRESS_MODE(FOLD) \ STRESS_MODE(MERGED_RETURNS) \ STRESS_MODE(BB_PROFILE) \ STRESS_MODE(OPT_BOOLS_GC) \ STRESS_MODE(REMORPH_TREES) \ STRESS_MODE(64RSLT_MUL) \ STRESS_MODE(DO_WHILE_LOOPS) \ STRESS_MODE(MIN_OPTS) \ STRESS_MODE(REVERSE_FLAG) /* Will set GTF_REVERSE_OPS whenever we can */ \ STRESS_MODE(REVERSE_COMMA) /* Will reverse commas created with gtNewCommaNode */ \ STRESS_MODE(TAILCALL) /* Will make the call as a tailcall whenever legal */ \ STRESS_MODE(CATCH_ARG) /* Will spill catch arg */ \ STRESS_MODE(UNSAFE_BUFFER_CHECKS) \ STRESS_MODE(NULL_OBJECT_CHECK) \ STRESS_MODE(PINVOKE_RESTORE_ESP) \ STRESS_MODE(RANDOM_INLINE) \ STRESS_MODE(SWITCH_CMP_BR_EXPANSION) \ STRESS_MODE(GENERIC_VARN) \ STRESS_MODE(PROFILER_CALLBACKS) /* Will generate profiler hooks for ELT callbacks */ \ STRESS_MODE(BYREF_PROMOTION) /* Change undoPromotion decisions for byrefs */ \ STRESS_MODE(PROMOTE_FEWER_STRUCTS)/* Don't promote some structs that can be promoted */ \ STRESS_MODE(VN_BUDGET)/* Randomize the VN budget */ \ \ /* After COUNT_VARN, stress level 2 does all of these all the time */ \ \ STRESS_MODE(COUNT_VARN) \ \ /* "Check" stress areas that can be exhaustively used if we */ \ /* dont care about performance at all */ \ \ STRESS_MODE(FORCE_INLINE) /* Treat every method as AggressiveInlining */ \ STRESS_MODE(CHK_FLOW_UPDATE) \ STRESS_MODE(EMITTER) \ STRESS_MODE(CHK_REIMPORT) \ STRESS_MODE(FLATFP) \ STRESS_MODE(GENERIC_CHECK) \ STRESS_MODE(COUNT) enum compStressArea { #define STRESS_MODE(mode) STRESS_##mode, STRESS_MODES #undef STRESS_MODE }; // clang-format on #ifdef DEBUG static const LPCWSTR s_compStressModeNames[STRESS_COUNT + 1]; BYTE compActiveStressModes[STRESS_COUNT]; #endif // DEBUG #define MAX_STRESS_WEIGHT 100 bool compStressCompile(compStressArea stressArea, unsigned weightPercentage); bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage); #ifdef DEBUG bool compInlineStress() { return compStressCompile(STRESS_LEGACY_INLINE, 50); } bool compRandomInlineStress() { return compStressCompile(STRESS_RANDOM_INLINE, 50); } bool compPromoteFewerStructs(unsigned lclNum); #endif // DEBUG bool compTailCallStress() { #ifdef DEBUG // Do not stress tailcalls in IL stubs as the runtime creates several IL // stubs to implement the tailcall mechanism, which would then // recursively create more IL stubs. return !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && (JitConfig.TailcallStress() != 0 || compStressCompile(STRESS_TAILCALL, 5)); #else return false; #endif } const char* compGetTieringName(bool wantShortName = false) const; const char* compGetStressMessage() const; codeOptimize compCodeOpt() const { #if 0 // Switching between size & speed has measurable throughput impact // (3.5% on NGen CoreLib when measured). It used to be enabled for // DEBUG, but should generate identical code between CHK & RET builds, // so that's not acceptable. // TODO-Throughput: Figure out what to do about size vs. speed & throughput. // Investigate the cause of the throughput regression. return opts.compCodeOpt; #else return BLENDED_CODE; #endif } //--------------------- Info about the procedure -------------------------- struct Info { COMP_HANDLE compCompHnd; CORINFO_MODULE_HANDLE compScopeHnd; CORINFO_CLASS_HANDLE compClassHnd; CORINFO_METHOD_HANDLE compMethodHnd; CORINFO_METHOD_INFO* compMethodInfo; bool hasCircularClassConstraints; bool hasCircularMethodConstraints; #if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS const char* compMethodName; const char* compClassName; const char* compFullName; double compPerfScore; int compMethodSuperPMIIndex; // useful when debugging under SuperPMI #endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS #if defined(DEBUG) || defined(INLINE_DATA) // Method hash is logically const, but computed // on first demand. mutable unsigned compMethodHashPrivate; unsigned compMethodHash() const; #endif // defined(DEBUG) || defined(INLINE_DATA) #ifdef PSEUDORANDOM_NOP_INSERTION // things for pseudorandom nop insertion unsigned compChecksum; CLRRandom compRNG; #endif // The following holds the FLG_xxxx flags for the method we're compiling. unsigned compFlags; // The following holds the class attributes for the method we're compiling. unsigned compClassAttr; const BYTE* compCode; IL_OFFSET compILCodeSize; // The IL code size IL_OFFSET compILImportSize; // Estimated amount of IL actually imported IL_OFFSET compILEntry; // The IL entry point (normally 0) PatchpointInfo* compPatchpointInfo; // Patchpoint data for OSR (normally nullptr) UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This // is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if: // (1) the code is not hot/cold split, and we issued less code than we expected, or // (2) the code is hot/cold split, and we issued less code than we expected // in the cold section (the hot section will always be padded out to compTotalHotCodeSize). bool compIsStatic : 1; // Is the method static (no 'this' pointer)? bool compIsVarArgs : 1; // Does the method have varargs parameters? bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options? bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic bool compHasNextCallRetAddr : 1; // The NextCallReturnAddress intrinsic is used. var_types compRetType; // Return type of the method as declared in IL var_types compRetNativeType; // Normalized return type as per target arch ABI unsigned compILargsCount; // Number of arguments (incl. implicit but not hidden) unsigned compArgsCount; // Number of arguments (incl. implicit and hidden) #if FEATURE_FASTTAILCALL unsigned compArgStackSize; // Incoming argument stack size in bytes #endif // FEATURE_FASTTAILCALL unsigned compRetBuffArg; // position of hidden return param var (0, 1) (BAD_VAR_NUM means not present); int compTypeCtxtArg; // position of hidden param for type context for generic code (CORINFO_CALLCONV_PARAMTYPE) unsigned compThisArg; // position of implicit this pointer param (not to be confused with lvaArg0Var) unsigned compILlocalsCount; // Number of vars : args + locals (incl. implicit but not hidden) unsigned compLocalsCount; // Number of vars : args + locals (incl. implicit and hidden) unsigned compMaxStack; UNATIVE_OFFSET compTotalHotCodeSize; // Total number of bytes of Hot Code in the method UNATIVE_OFFSET compTotalColdCodeSize; // Total number of bytes of Cold Code in the method unsigned compUnmanagedCallCountWithGCTransition; // count of unmanaged calls with GC transition. CorInfoCallConvExtension compCallConv; // The entry-point calling convention for this method. unsigned compLvFrameListRoot; // lclNum for the Frame root unsigned compXcptnsCount; // Number of exception-handling clauses read in the method's IL. // You should generally use compHndBBtabCount instead: it is the // current number of EH clauses (after additions like synchronized // methods and funclets, and removals like unreachable code deletion). Target::ArgOrder compArgOrder; bool compMatchedVM; // true if the VM is "matched": either the JIT is a cross-compiler // and the VM expects that, or the JIT is a "self-host" compiler // (e.g., x86 hosted targeting x86) and the VM expects that. /* The following holds IL scope information about local variables. */ unsigned compVarScopesCount; VarScopeDsc* compVarScopes; /* The following holds information about instr offsets for * which we need to report IP-mappings */ IL_OFFSET* compStmtOffsets; // sorted unsigned compStmtOffsetsCount; ICorDebugInfo::BoundaryTypes compStmtOffsetsImplicit; #define CPU_X86 0x0100 // The generic X86 CPU #define CPU_X86_PENTIUM_4 0x0110 #define CPU_X64 0x0200 // The generic x64 CPU #define CPU_AMD_X64 0x0210 // AMD x64 CPU #define CPU_INTEL_X64 0x0240 // Intel x64 CPU #define CPU_ARM 0x0300 // The generic ARM CPU #define CPU_ARM64 0x0400 // The generic ARM64 CPU unsigned genCPU; // What CPU are we running on // Number of class profile probes in this method unsigned compClassProbeCount; } info; // Returns true if the method being compiled returns a non-void and non-struct value. // Note that lvaInitTypeRef() normalizes compRetNativeType for struct returns in a // single register as per target arch ABI (e.g on Amd64 Windows structs of size 1, 2, // 4 or 8 gets normalized to TYP_BYTE/TYP_SHORT/TYP_INT/TYP_LONG; On Arm HFA structs). // Methods returning such structs are considered to return non-struct return value and // this method returns true in that case. bool compMethodReturnsNativeScalarType() { return (info.compRetType != TYP_VOID) && !varTypeIsStruct(info.compRetNativeType); } // Returns true if the method being compiled returns RetBuf addr as its return value bool compMethodReturnsRetBufAddr() { // There are cases where implicit RetBuf argument should be explicitly returned in a register. // In such cases the return type is changed to TYP_BYREF and appropriate IR is generated. // These cases are: CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_AMD64 // 1. on x64 Windows and Unix the address of RetBuf needs to be returned by // methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF, // returning the address of RetBuf. return (info.compRetBuffArg != BAD_VAR_NUM); #else // TARGET_AMD64 #ifdef PROFILING_SUPPORTED // 2. Profiler Leave callback expects the address of retbuf as return value for // methods with hidden RetBuf argument. impReturnInstruction() when profiler // callbacks are needed creates GT_RETURN(TYP_BYREF, op1 = Addr of RetBuf) for // methods with hidden RetBufArg. if (compIsProfilerHookNeeded()) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif // 3. Windows ARM64 native instance calling convention requires the address of RetBuff // to be returned in x0. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM64) if (TargetOS::IsWindows) { auto callConv = info.compCallConv; if (callConvIsInstanceMethodCallConv(callConv)) { return (info.compRetBuffArg != BAD_VAR_NUM); } } #endif // TARGET_ARM64 // 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) if (info.compCallConv != CorInfoCallConvExtension::Managed) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif return false; #endif // TARGET_AMD64 } // Returns true if the method returns a value in more than one return register // TODO-ARM-Bug: Deal with multi-register genReturnLocaled structs? // TODO-ARM64: Does this apply for ARM64 too? bool compMethodReturnsMultiRegRetType() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } bool compEnregLocals() { return ((opts.compFlags & CLFLG_REGVAR) != 0); } bool compEnregStructLocals() { return (JitConfig.JitEnregStructLocals() != 0); } bool compObjectStackAllocation() { return (JitConfig.JitObjectStackAllocation() != 0); } // Returns true if the method returns a value in more than one return register, // it should replace/be merged with compMethodReturnsMultiRegRetType when #36868 is fixed. // The difference from original `compMethodReturnsMultiRegRetType` is in ARM64 SIMD* handling, // this method correctly returns false for it (it is passed as HVA), when the original returns true. bool compMethodReturnsMultiRegRegTypeAlternate() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 #if defined(TARGET_ARM64) // TYP_SIMD* are returned in one register. if (varTypeIsSIMD(info.compRetNativeType)) { return false; } #endif // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } // Returns true if the method being compiled returns a value bool compMethodHasRetVal() { return compMethodReturnsNativeScalarType() || compMethodReturnsRetBufAddr() || compMethodReturnsMultiRegRetType(); } // Returns true if the method requires a PInvoke prolog and epilog bool compMethodRequiresPInvokeFrame() { return (info.compUnmanagedCallCountWithGCTransition > 0); } // Returns true if address-exposed user variables should be poisoned with a recognizable value bool compShouldPoisonFrame() { #ifdef FEATURE_ON_STACK_REPLACEMENT if (opts.IsOSR()) return false; #endif return !info.compInitMem && opts.compDbgCode; } // Returns true if the jit supports having patchpoints in this method. // Optionally, get the reason why not. bool compCanHavePatchpoints(const char** reason = nullptr); #if defined(DEBUG) void compDispLocalVars(); #endif // DEBUG private: class ClassLayoutTable* m_classLayoutTable; class ClassLayoutTable* typCreateClassLayoutTable(); class ClassLayoutTable* typGetClassLayoutTable(); public: // Get the layout having the specified layout number. ClassLayout* typGetLayoutByNum(unsigned layoutNum); // Get the layout number of the specified layout. unsigned typGetLayoutNum(ClassLayout* layout); // Get the layout having the specified size but no class handle. ClassLayout* typGetBlkLayout(unsigned blockSize); // Get the number of a layout having the specified size but no class handle. unsigned typGetBlkLayoutNum(unsigned blockSize); // Get the layout for the specified class handle. ClassLayout* typGetObjLayout(CORINFO_CLASS_HANDLE classHandle); // Get the number of a layout for the specified class handle. unsigned typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle); //-------------------------- Global Compiler Data ------------------------------------ #ifdef DEBUG private: static LONG s_compMethodsCount; // to produce unique label names #endif public: #ifdef DEBUG LONG compMethodID; unsigned compGenTreeID; unsigned compStatementID; unsigned compBasicBlockID; #endif BasicBlock* compCurBB; // the current basic block in process Statement* compCurStmt; // the current statement in process GenTree* compCurTree; // the current tree in process // The following is used to create the 'method JIT info' block. size_t compInfoBlkSize; BYTE* compInfoBlkAddr; EHblkDsc* compHndBBtab; // array of EH data unsigned compHndBBtabCount; // element count of used elements in EH data array unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array #if defined(TARGET_X86) //------------------------------------------------------------------------- // Tracking of region covered by the monitor in synchronized methods void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT #endif // !TARGET_X86 Phases mostRecentlyActivePhase; // the most recently active phase PhaseChecks activePhaseChecks; // the currently active phase checks //------------------------------------------------------------------------- // The following keeps track of how many bytes of local frame space we've // grabbed so far in the current function, and how many argument bytes we // need to pop when we return. // unsigned compLclFrameSize; // secObject+lclBlk+locals+temps // Count of callee-saved regs we pushed in the prolog. // Does not include EBP for isFramePointerUsed() and double-aligned frames. // In case of Amd64 this doesn't include float regs saved on stack. unsigned compCalleeRegsPushed; #if defined(TARGET_XARCH) // Mask of callee saved float regs on stack. regMaskTP compCalleeFPRegsSavedMask; #endif #ifdef TARGET_AMD64 // Quirk for VS debug-launch scenario to work: // Bytes of padding between save-reg area and locals. #define VSQUIRK_STACK_PAD (2 * REGSIZE_BYTES) unsigned compVSQuirkStackPaddingNeeded; #endif unsigned compArgSize; // total size of arguments in bytes (including register args (lvIsRegArg)) #ifdef TARGET_ARM bool compHasSplitParam; #endif unsigned compMapILargNum(unsigned ILargNum); // map accounting for hidden args unsigned compMapILvarNum(unsigned ILvarNum); // map accounting for hidden args unsigned compMap2ILvarNum(unsigned varNum) const; // map accounting for hidden args #if defined(TARGET_ARM64) struct FrameInfo { // Frame type (1-5) int frameType; // Distance from established (method body) SP to base of callee save area int calleeSaveSpOffset; // Amount to subtract from SP before saving (prolog) OR // to add to SP after restoring (epilog) callee saves int calleeSaveSpDelta; // Distance from established SP to where caller's FP was saved int offsetSpToSavedFp; } compFrameInfo; #endif //------------------------------------------------------------------------- static void compStartup(); // One-time initialization static void compShutdown(); // One-time finalization void compInit(ArenaAllocator* pAlloc, CORINFO_METHOD_HANDLE methodHnd, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, InlineInfo* inlineInfo); void compDone(); static void compDisplayStaticSizes(FILE* fout); //------------ Some utility functions -------------- void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ void** ppIndirection); /* OUT */ // Several JIT/EE interface functions return a CorInfoType, and also return a // class handle as an out parameter if the type is a value class. Returns the // size of the type these describe. unsigned compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd); #ifdef DEBUG // Components used by the compiler may write unit test suites, and // have them run within this method. They will be run only once per process, and only // in debug. (Perhaps should be under the control of a COMPlus_ flag.) // These should fail by asserting. void compDoComponentUnitTestsOnce(); #endif // DEBUG int compCompile(CORINFO_MODULE_HANDLE classPtr, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); void compCompileFinish(); int compCompileHelper(CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlag); ArenaAllocator* compGetArenaAllocator(); void generatePatchpointInfo(); #if MEASURE_MEM_ALLOC static bool s_dspMemStats; // Display per-phase memory statistics for every function #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS unsigned m_loopsConsidered; bool m_curLoopHasHoistedExpression; unsigned m_loopsWithHoistedExpressions; unsigned m_totalHoistedExpressions; void AddLoopHoistStats(); void PrintPerMethodLoopHoistStats(); static CritSecObject s_loopHoistStatsLock; // This lock protects the data structures below. static unsigned s_loopsConsidered; static unsigned s_loopsWithHoistedExpressions; static unsigned s_totalHoistedExpressions; static void PrintAggregateLoopHoistStats(FILE* f); #endif // LOOP_HOIST_STATS #if TRACK_ENREG_STATS class EnregisterStats { private: unsigned m_totalNumberOfVars; unsigned m_totalNumberOfStructVars; unsigned m_totalNumberOfEnregVars; unsigned m_totalNumberOfStructEnregVars; unsigned m_addrExposed; unsigned m_hiddenStructArg; unsigned m_VMNeedsStackAddr; unsigned m_localField; unsigned m_blockOp; unsigned m_dontEnregStructs; unsigned m_notRegSizeStruct; unsigned m_structArg; unsigned m_lclAddrNode; unsigned m_castTakesAddr; unsigned m_storeBlkSrc; unsigned m_oneAsgRetyping; unsigned m_swizzleArg; unsigned m_blockOpRet; unsigned m_returnSpCheck; unsigned m_simdUserForcesDep; unsigned m_liveInOutHndlr; unsigned m_depField; unsigned m_noRegVars; unsigned m_minOptsGC; #ifdef JIT32_GCENCODER unsigned m_PinningRef; #endif // JIT32_GCENCODER #if !defined(TARGET_64BIT) unsigned m_longParamField; #endif // !TARGET_64BIT unsigned m_parentExposed; unsigned m_tooConservative; unsigned m_escapeAddress; unsigned m_osrExposed; unsigned m_stressLclFld; unsigned m_copyFldByFld; unsigned m_dispatchRetBuf; unsigned m_wideIndir; public: void RecordLocal(const LclVarDsc* varDsc); void Dump(FILE* fout) const; }; static EnregisterStats s_enregisterStats; #endif // TRACK_ENREG_STATS bool compIsForImportOnly(); bool compIsForInlining() const; bool compDonotInline(); #ifdef DEBUG // Get the default fill char value we randomize this value when JitStress is enabled. static unsigned char compGetJitDefaultFill(Compiler* comp); const char* compLocalVarName(unsigned varNum, unsigned offs); VarName compVarName(regNumber reg, bool isFloatReg = false); const char* compRegVarName(regNumber reg, bool displayVar = false, bool isFloatReg = false); const char* compRegNameForSize(regNumber reg, size_t size); const char* compFPregVarName(unsigned fpReg, bool displayVar = false); void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP); void compDspSrcLinesByLineNum(unsigned line, bool seek = false); #endif // DEBUG //------------------------------------------------------------------------- struct VarScopeListNode { VarScopeDsc* data; VarScopeListNode* next; static VarScopeListNode* Create(VarScopeDsc* value, CompAllocator alloc) { VarScopeListNode* node = new (alloc) VarScopeListNode; node->data = value; node->next = nullptr; return node; } }; struct VarScopeMapInfo { VarScopeListNode* head; VarScopeListNode* tail; static VarScopeMapInfo* Create(VarScopeListNode* node, CompAllocator alloc) { VarScopeMapInfo* info = new (alloc) VarScopeMapInfo; info->head = node; info->tail = node; return info; } }; // Max value of scope count for which we would use linear search; for larger values we would use hashtable lookup. static const unsigned MAX_LINEAR_FIND_LCL_SCOPELIST = 32; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*> VarNumToScopeDscMap; // Map to keep variables' scope indexed by varNum containing it's scope dscs at the index. VarNumToScopeDscMap* compVarScopeMap; VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd); VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned offs); VarScopeDsc* compFindLocalVarLinear(unsigned varNum, unsigned offs); void compInitVarScopeMap(); VarScopeDsc** compEnterScopeList; // List has the offsets where variables // enter scope, sorted by instr offset unsigned compNextEnterScope; VarScopeDsc** compExitScopeList; // List has the offsets where variables // go out of scope, sorted by instr offset unsigned compNextExitScope; void compInitScopeLists(); void compResetScopeLists(); VarScopeDsc* compGetNextEnterScope(unsigned offs, bool scan = false); VarScopeDsc* compGetNextExitScope(unsigned offs, bool scan = false); void compProcessScopesUntil(unsigned offset, VARSET_TP* inScope, void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*), void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*)); #ifdef DEBUG void compDispScopeLists(); #endif // DEBUG bool compIsProfilerHookNeeded(); //------------------------------------------------------------------------- /* Statistical Data Gathering */ void compJitStats(); // call this function and enable // various ifdef's below for statistical data #if CALL_ARG_STATS void compCallArgStats(); static void compDispCallArgStats(FILE* fout); #endif //------------------------------------------------------------------------- protected: #ifdef DEBUG bool skipMethod(); #endif ArenaAllocator* compArenaAllocator; public: void compFunctionTraceStart(); void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI); protected: size_t compMaxUncheckedOffsetForNullObject; void compInitOptions(JitFlags* compileFlags); void compSetProcessor(); void compInitDebuggingInfo(); void compSetOptimizationLevel(); #ifdef TARGET_ARMARCH bool compRsvdRegCheck(FrameLayoutState curState); #endif void compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); // Clear annotations produced during optimizations; to be used between iterations when repeating opts. void ResetOptAnnotations(); // Regenerate loop descriptors; to be used between iterations when repeating opts. void RecomputeLoopInfo(); #ifdef PROFILING_SUPPORTED // Data required for generating profiler Enter/Leave/TailCall hooks bool compProfilerHookNeeded; // Whether profiler Enter/Leave/TailCall hook needs to be generated for the method void* compProfilerMethHnd; // Profiler handle of the method being compiled. Passed as param to ELT callbacks bool compProfilerMethHndIndirected; // Whether compProfilerHandle is pointer to the handle or is an actual handle #endif public: // Assumes called as part of process shutdown; does any compiler-specific work associated with that. static void ProcessShutdownWork(ICorStaticInfo* statInfo); CompAllocator getAllocator(CompMemKind cmk = CMK_Generic) { return CompAllocator(compArenaAllocator, cmk); } CompAllocator getAllocatorGC() { return getAllocator(CMK_GC); } CompAllocator getAllocatorLoopHoist() { return getAllocator(CMK_LoopHoist); } #ifdef DEBUG CompAllocator getAllocatorDebugOnly() { return getAllocator(CMK_DebugOnly); } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX typeInfo XX XX XX XX Checks for type compatibility and merges types XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Returns true if child is equal to or a subtype of parent for merge purposes // This support is necessary to suport attributes that are not described in // for example, signatures. For example, the permanent home byref (byref that // points to the gc heap), isn't a property of method signatures, therefore, // it is safe to have mismatches here (that tiCompatibleWith will not flag), // but when deciding if we need to reimport a block, we need to take these // in account bool tiMergeCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Returns true if child is equal to or a subtype of parent. // normalisedForStack indicates that both types are normalised for the stack bool tiCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Merges pDest and pSrc. Returns false if merge is undefined. // *pDest is modified to represent the merged type. Sets "*changed" to true // if this changes "*pDest". bool tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX IL verification stuff XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // The following is used to track liveness of local variables, initialization // of valueclass constructors, and type safe use of IL instructions. // dynamic state info needed for verification EntryState verCurrentState; // this ptr of object type .ctors are considered intited only after // the base class ctor is called, or an alternate ctor is called. // An uninited this ptr can be used to access fields, but cannot // be used to call a member function. bool verTrackObjCtorInitState; void verInitBBEntryState(BasicBlock* block, EntryState* currentState); // Requires that "tis" is not TIS_Bottom -- it's a definite init/uninit state. void verSetThisInit(BasicBlock* block, ThisInitState tis); void verInitCurrentState(); void verResetCurrentState(BasicBlock* block, EntryState* currentState); // Merges the current verification state into the entry state of "block", return false if that merge fails, // TRUE if it succeeds. Further sets "*changed" to true if this changes the entry state of "block". bool verMergeEntryStates(BasicBlock* block, bool* changed); void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)); void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)); typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef = false); // converts from jit type representation to typeInfo typeInfo verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo bool verIsSDArray(const typeInfo& ti); typeInfo verGetArrayElemType(const typeInfo& ti); typeInfo verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args); bool verIsByRefLike(const typeInfo& ti); bool verIsSafeToReturnByRef(const typeInfo& ti); // generic type variables range over types that satisfy IsBoxable bool verIsBoxable(const typeInfo& ti); void DECLSPEC_NORETURN verRaiseVerifyException(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); void verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); bool verCheckTailCallConstraint(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call // on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ); bool verIsBoxedValueType(const typeInfo& ti); void verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, // is this a "readonly." call? const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)); bool verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef); typeInfo verVerifySTIND(const typeInfo& ptr, const typeInfo& value, const typeInfo& instrType); typeInfo verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType); void verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis = false); void verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode); void verVerifyThisPtrInitialised(); bool verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target); #ifdef DEBUG // One line log function. Default level is 0. Increasing it gives you // more log information // levels are currently unused: #define JITDUMP(level,...) (); void JitLogEE(unsigned level, const char* fmt, ...); bool compDebugBreak; bool compJitHaltMethod(); #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GS Security checks for unsafe buffers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: struct ShadowParamVarInfo { FixedBitVect* assignGroup; // the closure set of variables whose values depend on each other unsigned shadowCopy; // Lcl var num, if not valid set to BAD_VAR_NUM static bool mayNeedShadowCopy(LclVarDsc* varDsc) { #if defined(TARGET_AMD64) // GS cookie logic to create shadow slots, create trees to copy reg args to shadow // slots and update all trees to refer to shadow slots is done immediately after // fgMorph(). Lsra could potentially mark a param as DoNotEnregister after JIT determines // not to shadow a parameter. Also, LSRA could potentially spill a param which is passed // in register. Therefore, conservatively all params may need a shadow copy. Note that // GS cookie logic further checks whether the param is a ptr or an unsafe buffer before // creating a shadow slot even though this routine returns true. // // TODO-AMD64-CQ: Revisit this conservative approach as it could create more shadow slots than // required. There are two cases under which a reg arg could potentially be used from its // home location: // a) LSRA marks it as DoNotEnregister (see LinearScan::identifyCandidates()) // b) LSRA spills it // // Possible solution to address case (a) // - The conditions under which LSRA marks a varDsc as DoNotEnregister could be checked // in this routine. Note that live out of exception handler is something we may not be // able to do it here since GS cookie logic is invoked ahead of liveness computation. // Therefore, for methods with exception handling and need GS cookie check we might have // to take conservative approach. // // Possible solution to address case (b) // - Whenver a parameter passed in an argument register needs to be spilled by LSRA, we // create a new spill temp if the method needs GS cookie check. return varDsc->lvIsParam; #else // !defined(TARGET_AMD64) return varDsc->lvIsParam && !varDsc->lvIsRegArg; #endif } #ifdef DEBUG void Print() { printf("assignGroup [%p]; shadowCopy: [%d];\n", assignGroup, shadowCopy); } #endif }; GSCookie* gsGlobalSecurityCookieAddr; // Address of global cookie for unsafe buffer checks GSCookie gsGlobalSecurityCookieVal; // Value of global cookie if addr is NULL ShadowParamVarInfo* gsShadowVarInfo; // Table used by shadow param analysis code void gsGSChecksInitCookie(); // Grabs cookie variable void gsCopyShadowParams(); // Identify vulnerable params and create dhadow copies bool gsFindVulnerableParams(); // Shadow param analysis code void gsParamsToShadows(); // Insert copy code and replave param uses by shadow static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk #define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined. // This can be overwritten by setting complus_JITInlineSize env variable. #define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined #define DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE 32 // fixed locallocs of this size or smaller will convert to local buffers private: #ifdef FEATURE_JIT_METHOD_PERF JitTimer* pCompJitTimer; // Timer data structure (by phases) for current compilation. static CompTimeSummaryInfo s_compJitTimerSummary; // Summary of the Timer information for the whole run. static LPCWSTR JitTimeLogCsv(); // Retrieve the file name for CSV from ConfigDWORD. static LPCWSTR compJitTimeLogFilename; // If a log file for JIT time is desired, filename to write it to. #endif void BeginPhase(Phases phase); // Indicate the start of the given phase. void EndPhase(Phases phase); // Indicate the end of the given phase. #if MEASURE_CLRAPI_CALLS // Thin wrappers that call into JitTimer (if present). inline void CLRApiCallEnter(unsigned apix); inline void CLRApiCallLeave(unsigned apix); public: inline void CLR_API_Enter(API_ICorJitInfo_Names ename); inline void CLR_API_Leave(API_ICorJitInfo_Names ename); private: #endif #if defined(DEBUG) || defined(INLINE_DATA) // These variables are associated with maintaining SQM data about compile time. unsigned __int64 m_compCyclesAtEndOfInlining; // The thread-virtualized cycle count at the end of the inlining phase // in the current compilation. unsigned __int64 m_compCycles; // Net cycle count for current compilation DWORD m_compTickCountAtEndOfInlining; // The result of GetTickCount() (# ms since some epoch marker) at the end of // the inlining phase in the current compilation. #endif // defined(DEBUG) || defined(INLINE_DATA) // Records the SQM-relevant (cycles and tick count). Should be called after inlining is complete. // (We do this after inlining because this marks the last point at which the JIT is likely to cause // type-loading and class initialization). void RecordStateAtEndOfInlining(); // Assumes being called at the end of compilation. Update the SQM state. void RecordStateAtEndOfCompilation(); public: #if FUNC_INFO_LOGGING static LPCWSTR compJitFuncInfoFilename; // If a log file for per-function information is required, this is the // filename to write it to. static FILE* compJitFuncInfoFile; // And this is the actual FILE* to write to. #endif // FUNC_INFO_LOGGING Compiler* prevCompiler; // Previous compiler on stack for TLS Compiler* linked list for reentrant compilers. #if MEASURE_NOWAY void RecordNowayAssert(const char* filename, unsigned line, const char* condStr); #endif // MEASURE_NOWAY #ifndef FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(); #else // FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(const char* filename, unsigned line); // Telemetry instance to use per method compilation. JitTelemetry compJitTelemetry; // Get common parameters that have to be logged with most telemetry data. void compGetTelemetryDefaults(const char** assemblyName, const char** scopeName, const char** methodName, unsigned* methodHash); #endif // !FEATURE_TRACELOGGING #ifdef DEBUG private: NodeToTestDataMap* m_nodeTestData; static const unsigned FIRST_LOOP_HOIST_CSE_CLASS = 1000; unsigned m_loopHoistCSEClass; // LoopHoist test annotations turn into CSE requirements; we // label them with CSE Class #'s starting at FIRST_LOOP_HOIST_CSE_CLASS. // Current kept in this. public: NodeToTestDataMap* GetNodeTestData() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_nodeTestData == nullptr) { compRoot->m_nodeTestData = new (getAllocatorDebugOnly()) NodeToTestDataMap(getAllocatorDebugOnly()); } return compRoot->m_nodeTestData; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap; // Returns the set (i.e., the domain of the result map) of nodes that are keys in m_nodeTestData, and // currently occur in the AST graph. NodeToIntMap* FindReachableNodesInNodeTestData(); // Node "from" is being eliminated, and being replaced by node "to". If "from" had any associated // test data, associate that data with "to". void TransferTestDataToNode(GenTree* from, GenTree* to); // These are the methods that test that the various conditions implied by the // test attributes are satisfied. void JitTestCheckSSA(); // SSA builder tests. void JitTestCheckVN(); // Value numbering tests. #endif // DEBUG // The "FieldSeqStore", for canonicalizing field sequences. See the definition of FieldSeqStore for // operations. FieldSeqStore* m_fieldSeqStore; FieldSeqStore* GetFieldSeqStore() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_fieldSeqStore == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_FieldSeqStore)); compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc); } return compRoot->m_fieldSeqStore; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, FieldSeqNode*> NodeToFieldSeqMap; // Some nodes of "TYP_BYREF" or "TYP_I_IMPL" actually represent the address of a field within a struct, but since // the offset of the field is zero, there's no "GT_ADD" node. We normally attach a field sequence to the constant // that is added, but what do we do when that constant is zero, and is thus not present? We use this mechanism to // attach the field sequence directly to the address node. NodeToFieldSeqMap* m_zeroOffsetFieldMap; NodeToFieldSeqMap* GetZeroOffsetFieldMap() { // Don't need to worry about inlining here if (m_zeroOffsetFieldMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for // allocation. CompAllocator ialloc(getAllocator(CMK_ZeroOffsetFieldMap)); m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc); } return m_zeroOffsetFieldMap; } // Requires that "op1" is a node of type "TYP_BYREF" or "TYP_I_IMPL". We are dereferencing this with the fields in // "fieldSeq", whose offsets are required all to be zero. Ensures that any field sequence annotation currently on // "op1" or its components is augmented by appending "fieldSeq". In practice, if "op1" is a GT_LCL_FLD, it has // a field sequence as a member; otherwise, it may be the addition of an a byref and a constant, where the const // has a field sequence -- in this case "fieldSeq" is appended to that of the constant; otherwise, we // record the the field sequence using the ZeroOffsetFieldMap described above. // // One exception above is that "op1" is a node of type "TYP_REF" where "op1" is a GT_LCL_VAR. // This happens when System.Object vtable pointer is a regular field at offset 0 in System.Private.CoreLib in // CoreRT. Such case is handled same as the default case. void fgAddFieldSeqForZeroOffset(GenTree* op1, FieldSeqNode* fieldSeq); typedef JitHashTable<const GenTree*, JitPtrKeyFuncs<GenTree>, ArrayInfo> NodeToArrayInfoMap; NodeToArrayInfoMap* m_arrayInfoMap; NodeToArrayInfoMap* GetArrayInfoMap() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_arrayInfoMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_arrayInfoMap = new (ialloc) NodeToArrayInfoMap(ialloc); } return compRoot->m_arrayInfoMap; } //----------------------------------------------------------------------------------------------------------------- // Compiler::TryGetArrayInfo: // Given an indirection node, checks to see whether or not that indirection represents an array access, and // if so returns information about the array. // // Arguments: // indir - The `GT_IND` node. // arrayInfo (out) - Information about the accessed array if this function returns true. Undefined otherwise. // // Returns: // True if the `GT_IND` node represents an array access; false otherwise. bool TryGetArrayInfo(GenTreeIndir* indir, ArrayInfo* arrayInfo) { if ((indir->gtFlags & GTF_IND_ARR_INDEX) == 0) { return false; } if (indir->gtOp1->OperIs(GT_INDEX_ADDR)) { GenTreeIndexAddr* const indexAddr = indir->gtOp1->AsIndexAddr(); *arrayInfo = ArrayInfo(indexAddr->gtElemType, indexAddr->gtElemSize, indexAddr->gtElemOffset, indexAddr->gtStructElemClass); return true; } bool found = GetArrayInfoMap()->Lookup(indir, arrayInfo); assert(found); return true; } NodeToUnsignedMap* m_memorySsaMap[MemoryKindCount]; // In some cases, we want to assign intermediate SSA #'s to memory states, and know what nodes create those memory // states. (We do this for try blocks, where, if the try block doesn't do a call that loses track of the memory // state, all the possible memory states are possible initial states of the corresponding catch block(s).) NodeToUnsignedMap* GetMemorySsaMap(MemoryKind memoryKind) { if (memoryKind == GcHeap && byrefStatesMatchGcHeapStates) { // Use the same map for GCHeap and ByrefExposed when their states match. memoryKind = ByrefExposed; } assert(memoryKind < MemoryKindCount); Compiler* compRoot = impInlineRoot(); if (compRoot->m_memorySsaMap[memoryKind] == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_memorySsaMap[memoryKind] = new (ialloc) NodeToUnsignedMap(ialloc); } return compRoot->m_memorySsaMap[memoryKind]; } // The Refany type is the only struct type whose structure is implicitly assumed by IL. We need its fields. CORINFO_CLASS_HANDLE m_refAnyClass; CORINFO_FIELD_HANDLE GetRefanyDataField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 0); } CORINFO_FIELD_HANDLE GetRefanyTypeField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 1); } #if VARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_varsetOpCounter; #endif #if ALLVARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_allvarsetOpCounter; #endif static HelperCallProperties s_helperCallProperties; #ifdef UNIX_AMD64_ABI static var_types GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size); static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum); static void GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); void GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); #endif // defined(UNIX_AMD64_ABI) void fgMorphMultiregStructArgs(GenTreeCall* call); GenTree* fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr); bool killGCRefs(GenTree* tree); }; // end of class Compiler //--------------------------------------------------------------------------------------------------------------------- // GenTreeVisitor: a flexible tree walker implemented using the curiously-recurring-template pattern. // // This class implements a configurable walker for IR trees. There are five configuration options (defaults values are // shown in parentheses): // // - ComputeStack (false): when true, the walker will push each node onto the `m_ancestors` stack. "Ancestors" is a bit // of a misnomer, as the first entry will always be the current node. // // - DoPreOrder (false): when true, the walker will invoke `TVisitor::PreOrderVisit` with the current node as an // argument before visiting the node's operands. // // - DoPostOrder (false): when true, the walker will invoke `TVisitor::PostOrderVisit` with the current node as an // argument after visiting the node's operands. // // - DoLclVarsOnly (false): when true, the walker will only invoke `TVisitor::PreOrderVisit` for lclVar nodes. // `DoPreOrder` must be true if this option is true. // // - UseExecutionOrder (false): when true, then walker will visit a node's operands in execution order (e.g. if a // binary operator has the `GTF_REVERSE_OPS` flag set, the second operand will be // visited before the first). // // At least one of `DoPreOrder` and `DoPostOrder` must be specified. // // A simple pre-order visitor might look something like the following: // // class CountingVisitor final : public GenTreeVisitor<CountingVisitor> // { // public: // enum // { // DoPreOrder = true // }; // // unsigned m_count; // // CountingVisitor(Compiler* compiler) // : GenTreeVisitor<CountingVisitor>(compiler), m_count(0) // { // } // // Compiler::fgWalkResult PreOrderVisit(GenTree* node) // { // m_count++; // } // }; // // This visitor would then be used like so: // // CountingVisitor countingVisitor(compiler); // countingVisitor.WalkTree(root); // template <typename TVisitor> class GenTreeVisitor { protected: typedef Compiler::fgWalkResult fgWalkResult; enum { ComputeStack = false, DoPreOrder = false, DoPostOrder = false, DoLclVarsOnly = false, UseExecutionOrder = false, }; Compiler* m_compiler; ArrayStack<GenTree*> m_ancestors; GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler->getAllocator(CMK_ArrayStack)) { assert(compiler != nullptr); static_assert_no_msg(TVisitor::DoPreOrder || TVisitor::DoPostOrder); static_assert_no_msg(!TVisitor::DoLclVarsOnly || TVisitor::DoPreOrder); } fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } public: fgWalkResult WalkTree(GenTree** use, GenTree* user) { assert(use != nullptr); GenTree* node = *use; if (TVisitor::ComputeStack) { m_ancestors.Push(node); } fgWalkResult result = fgWalkResult::WALK_CONTINUE; if (TVisitor::DoPreOrder && !TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } node = *use; if ((node == nullptr) || (result == fgWalkResult::WALK_SKIP_SUBTREES)) { goto DONE; } } switch (node->OperGet()) { // Leaf lclVars case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Leaf nodes case GT_CATCH_ARG: case GT_LABEL: case GT_FTN_ADDR: case GT_RET_EXPR: case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_MEMORYBARRIER: case GT_JMP: case GT_JCC: case GT_SETCC: case GT_NO_OP: case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: #if !defined(FEATURE_EH_FUNCLETS) case GT_END_LFIN: #endif // !FEATURE_EH_FUNCLETS case GT_PHI_ARG: case GT_JMPTABLE: case GT_CLS_VAR: case GT_CLS_VAR_ADDR: case GT_ARGPLACE: case GT_PHYSREG: case GT_EMITNOP: case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: case GT_IL_OFFSET: break; // Lclvar unary operators case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Standard unary operators case GT_NOT: case GT_NEG: case GT_BSWAP: case GT_BSWAP16: case GT_COPY: case GT_RELOAD: case GT_ARR_LENGTH: case GT_CAST: case GT_BITCAST: case GT_CKFINITE: case GT_LCLHEAP: case GT_ADDR: case GT_IND: case GT_OBJ: case GT_BLK: case GT_BOX: case GT_ALLOCOBJ: case GT_INIT_VAL: case GT_JTRUE: case GT_SWITCH: case GT_NULLCHECK: case GT_PUTARG_REG: case GT_PUTARG_STK: case GT_PUTARG_TYPE: case GT_RETURNTRAP: case GT_NOP: case GT_FIELD: case GT_RETURN: case GT_RETFILT: case GT_RUNTIMELOOKUP: case GT_KEEPALIVE: case GT_INC_SATURATE: { GenTreeUnOp* const unOp = node->AsUnOp(); if (unOp->gtOp1 != nullptr) { result = WalkTree(&unOp->gtOp1, unOp); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } // Special nodes case GT_PHI: for (GenTreePhi::Use& use : node->AsPhi()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_FIELD_LIST: for (GenTreeFieldList::Use& use : node->AsFieldList()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_CMPXCHG: { GenTreeCmpXchg* const cmpXchg = node->AsCmpXchg(); result = WalkTree(&cmpXchg->gtOpLocation, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpValue, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpComparand, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_ARR_ELEM: { GenTreeArrElem* const arrElem = node->AsArrElem(); result = WalkTree(&arrElem->gtArrObj, arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } const unsigned rank = arrElem->gtArrRank; for (unsigned dim = 0; dim < rank; dim++) { result = WalkTree(&arrElem->gtArrInds[dim], arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } case GT_ARR_OFFSET: { GenTreeArrOffs* const arrOffs = node->AsArrOffs(); result = WalkTree(&arrOffs->gtOffset, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtIndex, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtArrObj, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_STORE_DYN_BLK: { GenTreeStoreDynBlk* const dynBlock = node->AsStoreDynBlk(); GenTree** op1Use = &dynBlock->gtOp1; GenTree** op2Use = &dynBlock->gtOp2; GenTree** op3Use = &dynBlock->gtDynamicSize; result = WalkTree(op1Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op2Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op3Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_CALL: { GenTreeCall* const call = node->AsCall(); if (call->gtCallThisArg != nullptr) { result = WalkTree(&call->gtCallThisArg->NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->Args()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->LateArgs()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtCallType == CT_INDIRECT) { if (call->gtCallCookie != nullptr) { result = WalkTree(&call->gtCallCookie, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } result = WalkTree(&call->gtCallAddr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtControlExpr != nullptr) { result = WalkTree(&call->gtControlExpr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { assert(node->AsMultiOp()->GetOperandCount() == 2); result = WalkTree(&node->AsMultiOp()->Op(2), node); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&node->AsMultiOp()->Op(1), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } else { for (GenTree** use : node->AsMultiOp()->UseEdges()) { result = WalkTree(use, node); if (result == fgWalkResult::WALK_ABORT) { return result; } } } break; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) // Binary nodes default: { assert(node->OperIsBinary()); GenTreeOp* const op = node->AsOp(); GenTree** op1Use = &op->gtOp1; GenTree** op2Use = &op->gtOp2; if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { std::swap(op1Use, op2Use); } if (*op1Use != nullptr) { result = WalkTree(op1Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (*op2Use != nullptr) { result = WalkTree(op2Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } } DONE: // Finally, visit the current node if (TVisitor::DoPostOrder) { result = reinterpret_cast<TVisitor*>(this)->PostOrderVisit(use, user); } if (TVisitor::ComputeStack) { m_ancestors.Pop(); } return result; } }; template <bool computeStack, bool doPreOrder, bool doPostOrder, bool doLclVarsOnly, bool useExecutionOrder> class GenericTreeWalker final : public GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>> { public: enum { ComputeStack = computeStack, DoPreOrder = doPreOrder, DoPostOrder = doPostOrder, DoLclVarsOnly = doLclVarsOnly, UseExecutionOrder = useExecutionOrder, }; private: Compiler::fgWalkData* m_walkData; public: GenericTreeWalker(Compiler::fgWalkData* walkData) : GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>( walkData->compiler) , m_walkData(walkData) { assert(walkData != nullptr); if (computeStack) { walkData->parentStack = &this->m_ancestors; } } Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtprVisitorFn(use, m_walkData); } Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtpoVisitorFn(use, m_walkData); } }; // A dominator tree visitor implemented using the curiously-recurring-template pattern, similar to GenTreeVisitor. template <typename TVisitor> class DomTreeVisitor { protected: Compiler* const m_compiler; DomTreeNode* const m_domTree; DomTreeVisitor(Compiler* compiler, DomTreeNode* domTree) : m_compiler(compiler), m_domTree(domTree) { } void Begin() { } void PreOrderVisit(BasicBlock* block) { } void PostOrderVisit(BasicBlock* block) { } void End() { } public: //------------------------------------------------------------------------ // WalkTree: Walk the dominator tree, starting from fgFirstBB. // // Notes: // This performs a non-recursive, non-allocating walk of the tree by using // DomTreeNode's firstChild and nextSibling links to locate the children of // a node and BasicBlock's bbIDom parent link to go back up the tree when // no more children are left. // // Forests are also supported, provided that all the roots are chained via // DomTreeNode::nextSibling to fgFirstBB. // void WalkTree() { static_cast<TVisitor*>(this)->Begin(); for (BasicBlock *next, *block = m_compiler->fgFirstBB; block != nullptr; block = next) { static_cast<TVisitor*>(this)->PreOrderVisit(block); next = m_domTree[block->bbNum].firstChild; if (next != nullptr) { assert(next->bbIDom == block); continue; } do { static_cast<TVisitor*>(this)->PostOrderVisit(block); next = m_domTree[block->bbNum].nextSibling; if (next != nullptr) { assert(next->bbIDom == block->bbIDom); break; } block = block->bbIDom; } while (block != nullptr); } static_cast<TVisitor*>(this)->End(); } }; // EHClauses: adapter class for forward iteration of the exception handling table using range-based `for`, e.g.: // for (EHblkDsc* const ehDsc : EHClauses(compiler)) // class EHClauses { EHblkDsc* m_begin; EHblkDsc* m_end; // Forward iterator for the exception handling table entries. Iteration is in table order. // class iterator { EHblkDsc* m_ehDsc; public: iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc) { } EHblkDsc* operator*() const { return m_ehDsc; } iterator& operator++() { ++m_ehDsc; return *this; } bool operator!=(const iterator& i) const { return m_ehDsc != i.m_ehDsc; } }; public: EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount) { assert((m_begin != nullptr) || (m_begin == m_end)); } iterator begin() const { return iterator(m_begin); } iterator end() const { return iterator(m_end); } }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Miscellaneous Compiler stuff XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Values used to mark the types a stack slot is used for const unsigned TYPE_REF_INT = 0x01; // slot used as a 32-bit int const unsigned TYPE_REF_LNG = 0x02; // slot used as a 64-bit long const unsigned TYPE_REF_FLT = 0x04; // slot used as a 32-bit float const unsigned TYPE_REF_DBL = 0x08; // slot used as a 64-bit float const unsigned TYPE_REF_PTR = 0x10; // slot used as a 32-bit pointer const unsigned TYPE_REF_BYR = 0x20; // slot used as a byref pointer const unsigned TYPE_REF_STC = 0x40; // slot used as a struct const unsigned TYPE_REF_TYPEMASK = 0x7F; // bits that represent the type // const unsigned TYPE_REF_ADDR_TAKEN = 0x80; // slots address was taken /***************************************************************************** * * Variables to keep track of total code amounts. */ #if DISPLAY_SIZES extern size_t grossVMsize; extern size_t grossNCsize; extern size_t totalNCsize; extern unsigned genMethodICnt; extern unsigned genMethodNCnt; extern size_t gcHeaderISize; extern size_t gcPtrMapISize; extern size_t gcHeaderNSize; extern size_t gcPtrMapNSize; #endif // DISPLAY_SIZES /***************************************************************************** * * Variables to keep track of basic block counts (more data on 1 BB methods) */ #if COUNT_BASIC_BLOCKS extern Histogram bbCntTable; extern Histogram bbOneBBSizeTable; #endif /***************************************************************************** * * Used by optFindNaturalLoops to gather statistical information such as * - total number of natural loops * - number of loops with 1, 2, ... exit conditions * - number of loops that have an iterator (for like) * - number of loops that have a constant iterator */ #if COUNT_LOOPS extern unsigned totalLoopMethods; // counts the total number of methods that have natural loops extern unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has extern unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent extern unsigned totalLoopCount; // counts the total number of natural loops extern unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops extern unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent extern unsigned iterLoopCount; // counts the # of loops with an iterator (for like) extern unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like) extern bool hasMethodLoops; // flag to keep track if we already counted a method as having loops extern unsigned loopsThisMethod; // counts the number of loops in the current method extern bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method. extern Histogram loopCountTable; // Histogram of loop counts extern Histogram loopExitCountTable; // Histogram of loop exit counts #endif // COUNT_LOOPS /***************************************************************************** * variables to keep track of how many iterations we go in a dataflow pass */ #if DATAFLOW_ITER extern unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow extern unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow #endif // DATAFLOW_ITER #if MEASURE_BLOCK_SIZE extern size_t genFlowNodeSize; extern size_t genFlowNodeCnt; #endif // MEASURE_BLOCK_SIZE #if MEASURE_NODE_SIZE struct NodeSizeStats { void Init() { genTreeNodeCnt = 0; genTreeNodeSize = 0; genTreeNodeActualSize = 0; } // Count of tree nodes allocated. unsigned __int64 genTreeNodeCnt; // The size we allocate. unsigned __int64 genTreeNodeSize; // The actual size of the node. Note that the actual size will likely be smaller // than the allocated size, but we sometimes use SetOper()/ChangeOper() to change // a smaller node to a larger one. TODO-Cleanup: add stats on // SetOper()/ChangeOper() usage to quantify this. unsigned __int64 genTreeNodeActualSize; }; extern NodeSizeStats genNodeSizeStats; // Total node size stats extern NodeSizeStats genNodeSizeStatsPerFunc; // Per-function node size stats extern Histogram genTreeNcntHist; extern Histogram genTreeNsizHist; #endif // MEASURE_NODE_SIZE /***************************************************************************** * Count fatal errors (including noway_asserts). */ #if MEASURE_FATAL extern unsigned fatal_badCode; extern unsigned fatal_noWay; extern unsigned fatal_implLimitation; extern unsigned fatal_NOMEM; extern unsigned fatal_noWayAssertBody; #ifdef DEBUG extern unsigned fatal_noWayAssertBodyArgs; #endif // DEBUG extern unsigned fatal_NYI; #endif // MEASURE_FATAL /***************************************************************************** * Codegen */ #ifdef TARGET_XARCH const instruction INS_SHIFT_LEFT_LOGICAL = INS_shl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_shr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_sar; const instruction INS_AND = INS_and; const instruction INS_OR = INS_or; const instruction INS_XOR = INS_xor; const instruction INS_NEG = INS_neg; const instruction INS_TEST = INS_test; const instruction INS_MUL = INS_imul; const instruction INS_SIGNED_DIVIDE = INS_idiv; const instruction INS_UNSIGNED_DIVIDE = INS_div; const instruction INS_BREAKPOINT = INS_int3; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbb; const instruction INS_NOT = INS_not; #endif // TARGET_XARCH #ifdef TARGET_ARM const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr; const instruction INS_AND = INS_and; const instruction INS_OR = INS_orr; const instruction INS_XOR = INS_eor; const instruction INS_NEG = INS_rsb; const instruction INS_TEST = INS_tst; const instruction INS_MUL = INS_mul; const instruction INS_MULADD = INS_mla; const instruction INS_SIGNED_DIVIDE = INS_sdiv; const instruction INS_UNSIGNED_DIVIDE = INS_udiv; const instruction INS_BREAKPOINT = INS_bkpt; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbc; const instruction INS_NOT = INS_mvn; const instruction INS_ABS = INS_vabs; const instruction INS_SQRT = INS_vsqrt; #endif // TARGET_ARM #ifdef TARGET_ARM64 const instruction INS_MULADD = INS_madd; inline const instruction INS_BREAKPOINT_osHelper() { // GDB needs the encoding of brk #0 // Windbg needs the encoding of brk #F000 return TargetOS::IsUnix ? INS_brk_unix : INS_brk_windows; } #define INS_BREAKPOINT INS_BREAKPOINT_osHelper() const instruction INS_ABS = INS_fabs; const instruction INS_SQRT = INS_fsqrt; #endif // TARGET_ARM64 /*****************************************************************************/ extern const BYTE genTypeSizes[]; extern const BYTE genTypeAlignments[]; extern const BYTE genTypeStSzs[]; extern const BYTE genActualTypes[]; /*****************************************************************************/ #ifdef DEBUG void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars); #endif // DEBUG #include "compiler.hpp" // All the shared inline functions /*****************************************************************************/ #endif //_COMPILER_H_ /*****************************************************************************/
1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/jit/fgdiagnostic.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "allocacheck.h" // for alloca // Flowgraph Check and Dump Support #ifdef DEBUG void Compiler::fgPrintEdgeWeights() { // Print out all of the edge weights for (BasicBlock* const bDst : Blocks()) { if (bDst->bbPreds != nullptr) { printf(" Edge weights into " FMT_BB " :", bDst->bbNum); for (flowList* const edge : bDst->PredEdges()) { BasicBlock* bSrc = edge->getBlock(); // This is the control flow edge (bSrc -> bDst) printf(FMT_BB " ", bSrc->bbNum); if (edge->edgeWeightMin() < BB_MAX_WEIGHT) { printf("(%f", edge->edgeWeightMin()); } else { printf("(MAX"); } if (edge->edgeWeightMin() != edge->edgeWeightMax()) { if (edge->edgeWeightMax() < BB_MAX_WEIGHT) { printf("..%f", edge->edgeWeightMax()); } else { printf("..MAX"); } } printf(")"); if (edge->flNext != nullptr) { printf(", "); } } printf("\n"); } } } #endif // DEBUG /***************************************************************************** * Check that the flow graph is really updated */ #ifdef DEBUG void Compiler::fgDebugCheckUpdate() { if (!compStressCompile(STRESS_CHK_FLOW_UPDATE, 30)) { return; } /* We check for these conditions: * no unreachable blocks -> no blocks have countOfInEdges() = 0 * no empty blocks -> !block->isEmpty(), unless non-removable or multiple in-edges * no un-imported blocks -> no blocks have BBF_IMPORTED not set (this is * kind of redundand with the above, but to make sure) * no un-compacted blocks -> BBJ_NONE followed by block with no jumps to it (countOfInEdges() = 1) */ BasicBlock* prev; BasicBlock* block; for (prev = nullptr, block = fgFirstBB; block != nullptr; prev = block, block = block->bbNext) { /* no unreachable blocks */ if ((block->countOfInEdges() == 0) && !(block->bbFlags & BBF_DONT_REMOVE) #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // With funclets, we never get rid of the BBJ_ALWAYS part of a BBJ_CALLFINALLY/BBJ_ALWAYS pair, // even if we can prove that the finally block never returns. && !block->isBBCallAlwaysPairTail() #endif // FEATURE_EH_FUNCLETS ) { noway_assert(!"Unreachable block not removed!"); } /* no empty blocks */ if (block->isEmpty() && !(block->bbFlags & BBF_DONT_REMOVE)) { switch (block->bbJumpKind) { case BBJ_CALLFINALLY: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: case BBJ_RETURN: /* for BBJ_ALWAYS is probably just a GOTO, but will have to be treated */ case BBJ_ALWAYS: case BBJ_EHCATCHRET: /* These jump kinds are allowed to have empty tree lists */ break; default: /* it may be the case that the block had more than one reference to it * so we couldn't remove it */ if (block->countOfInEdges() == 0) { noway_assert(!"Empty block not removed!"); } break; } } /* no un-imported blocks */ if (!(block->bbFlags & BBF_IMPORTED)) { /* internal blocks do not count */ if (!(block->bbFlags & BBF_INTERNAL)) { noway_assert(!"Non IMPORTED block not removed!"); } } bool prevIsCallAlwaysPair = block->isBBCallAlwaysPairTail(); // Check for an unnecessary jumps to the next block bool doAssertOnJumpToNextBlock = false; // unless we have a BBJ_COND or BBJ_ALWAYS we can not assert if (block->bbJumpKind == BBJ_COND) { // A conditional branch should never jump to the next block // as it can be folded into a BBJ_NONE; doAssertOnJumpToNextBlock = true; } else if (block->bbJumpKind == BBJ_ALWAYS) { // Generally we will want to assert if a BBJ_ALWAYS branches to the next block doAssertOnJumpToNextBlock = true; // If the BBF_KEEP_BBJ_ALWAYS flag is set we allow it to jump to the next block if (block->bbFlags & BBF_KEEP_BBJ_ALWAYS) { doAssertOnJumpToNextBlock = false; } // A call/always pair is also allowed to jump to the next block if (prevIsCallAlwaysPair) { doAssertOnJumpToNextBlock = false; } // We are allowed to have a branch from a hot 'block' to a cold 'bbNext' // if ((block->bbNext != nullptr) && fgInDifferentRegions(block, block->bbNext)) { doAssertOnJumpToNextBlock = false; } } if (doAssertOnJumpToNextBlock) { if (block->bbJumpDest == block->bbNext) { noway_assert(!"Unnecessary jump to the next block!"); } } /* Make sure BBF_KEEP_BBJ_ALWAYS is set correctly */ if ((block->bbJumpKind == BBJ_ALWAYS) && prevIsCallAlwaysPair) { noway_assert(block->bbFlags & BBF_KEEP_BBJ_ALWAYS); } /* For a BBJ_CALLFINALLY block we make sure that we are followed by */ /* an BBJ_ALWAYS block with BBF_INTERNAL set */ /* or that it's a BBF_RETLESS_CALL */ if (block->bbJumpKind == BBJ_CALLFINALLY) { assert((block->bbFlags & BBF_RETLESS_CALL) || block->isBBCallAlwaysPair()); } /* no un-compacted blocks */ if (fgCanCompactBlocks(block, block->bbNext)) { noway_assert(!"Found un-compacted blocks!"); } } } #endif // DEBUG #if DUMP_FLOWGRAPHS struct escapeMapping_t { char ch; const char* sub; }; // clang-format off static escapeMapping_t s_EscapeFileMapping[] = { {':', "="}, {'<', "["}, {'>', "]"}, {';', "~semi~"}, {'|', "~bar~"}, {'&', "~amp~"}, {'"', "~quot~"}, {'*', "~star~"}, {0, nullptr} }; static escapeMapping_t s_EscapeMapping[] = { {'<', "&lt;"}, {'>', "&gt;"}, {'&', "&amp;"}, {'"', "&quot;"}, {0, nullptr} }; // clang-format on const char* Compiler::fgProcessEscapes(const char* nameIn, escapeMapping_t* map) { const char* nameOut = nameIn; unsigned lengthOut; unsigned index; bool match; bool subsitutionRequired; const char* pChar; lengthOut = 1; subsitutionRequired = false; pChar = nameIn; while (*pChar != '\0') { match = false; index = 0; while (map[index].ch != 0) { if (*pChar == map[index].ch) { match = true; break; } index++; } if (match) { subsitutionRequired = true; lengthOut += (unsigned)strlen(map[index].sub); } else { lengthOut += 1; } pChar++; } if (subsitutionRequired) { char* newName = getAllocator(CMK_DebugOnly).allocate<char>(lengthOut); char* pDest; pDest = newName; pChar = nameIn; while (*pChar != '\0') { match = false; index = 0; while (map[index].ch != 0) { if (*pChar == map[index].ch) { match = true; break; } index++; } if (match) { strcpy(pDest, map[index].sub); pDest += strlen(map[index].sub); } else { *pDest++ = *pChar; } pChar++; } *pDest++ = '\0'; nameOut = (const char*)newName; } return nameOut; } static void fprintfDouble(FILE* fgxFile, double value) { assert(value >= 0.0); if ((value >= 0.010) || (value == 0.0)) { fprintf(fgxFile, "\"%7.3f\"", value); } else if (value >= 0.00010) { fprintf(fgxFile, "\"%7.5f\"", value); } else { fprintf(fgxFile, "\"%7E\"", value); } } //------------------------------------------------------------------------ // fgDumpTree: Dump a tree into the DOT file. Used to provide a very short, one-line, // visualization of a BBJ_COND block. // // Arguments: // fgxFile - The file we are writing to. // tree - The operand to dump. // // static void Compiler::fgDumpTree(FILE* fgxFile, GenTree* const tree) { if (tree->OperIsCompare()) { // Want to generate something like: // V01 <= 7 // V01 > V02 const char* opName = GenTree::OpName(tree->OperGet()); // Make it look nicer if we can switch (tree->OperGet()) { case GT_EQ: opName = "=="; break; case GT_NE: opName = "!="; break; case GT_LT: opName = "<"; break; case GT_LE: opName = "<="; break; case GT_GE: opName = ">="; break; case GT_GT: opName = ">"; break; default: break; } GenTree* const lhs = tree->AsOp()->gtOp1; GenTree* const rhs = tree->AsOp()->gtOp2; fgDumpTree(fgxFile, lhs); fprintf(fgxFile, " %s ", opName); fgDumpTree(fgxFile, rhs); } else if (tree->IsCnsIntOrI()) { fprintf(fgxFile, "%d", tree->AsIntCon()->gtIconVal); } else if (tree->IsCnsFltOrDbl()) { fprintf(fgxFile, "%g", tree->AsDblCon()->gtDconVal); } else if (tree->IsLocal()) { fprintf(fgxFile, "V%02u", tree->AsLclVarCommon()->GetLclNum()); } else if (tree->OperIs(GT_ARR_LENGTH)) { GenTreeArrLen* arrLen = tree->AsArrLen(); GenTree* arr = arrLen->ArrRef(); fgDumpTree(fgxFile, arr); fprintf(fgxFile, ".Length"); } else { fprintf(fgxFile, "[%s]", GenTree::OpName(tree->OperGet())); } } //------------------------------------------------------------------------ // fgOpenFlowGraphFile: Open a file to dump either the xml or dot format flow graph // // Arguments: // wbDontClose - A boolean out argument that indicates whether the caller should close the file // phase - A phase identifier to indicate which phase is associated with the dump // pos - Are we being called to dump the flow graph pre-phase or post-phase? // type - A (wide) string indicating the type of dump, "dot" or "xml" // // Notes: // The filename to use to write the data comes from the COMPlus_JitDumpFgFile or COMPlus_NgenDumpFgFile // configuration. If unset, use "default". The "type" argument is used as a filename extension, // e.g., "default.dot". // // There are several "special" filenames recognized: // "profiled" -- only create graphs for methods with profile info, one file per method. // "hot" -- only create graphs for the hot region, one file per method. // "cold" -- only create graphs for the cold region, one file per method. // "jit" -- only create graphs for JITing, one file per method. // "all" -- create graphs for all regions, one file per method. // "stdout" -- output to stdout, not a file. // "stderr" -- output to stderr, not a file. // // Return Value: // Opens a file to which a flowgraph can be dumped, whose name is based on the current // config vales. FILE* Compiler::fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePosition pos, LPCWSTR type) { FILE* fgxFile; LPCWSTR prePhasePattern = nullptr; // pre-phase: default (used in Release) is no pre-phase dump LPCWSTR postPhasePattern = W("*"); // post-phase: default (used in Release) is dump all phases bool dumpFunction = true; // default (used in Release) is always dump LPCWSTR filename = nullptr; LPCWSTR pathname = nullptr; const char* escapedString; bool createDuplicateFgxFiles = true; if (fgBBcount <= 1) { return nullptr; } #ifdef DEBUG if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { dumpFunction = JitConfig.NgenDumpFg().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args); filename = JitConfig.NgenDumpFgFile(); pathname = JitConfig.NgenDumpFgDir(); } else { dumpFunction = JitConfig.JitDumpFg().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args); filename = JitConfig.JitDumpFgFile(); pathname = JitConfig.JitDumpFgDir(); } prePhasePattern = JitConfig.JitDumpFgPrePhase(); postPhasePattern = JitConfig.JitDumpFgPhase(); #endif // DEBUG if (!dumpFunction) { return nullptr; } LPCWSTR phaseName = PhaseShortNames[phase]; if (pos == PhasePosition::PrePhase) { if (prePhasePattern == nullptr) { // If pre-phase pattern is not specified, then don't dump for any pre-phase. return nullptr; } else if (*prePhasePattern != W('*')) { if (wcsstr(prePhasePattern, phaseName) == nullptr) { return nullptr; } } } else { assert(pos == PhasePosition::PostPhase); if (postPhasePattern == nullptr) { // There's no post-phase pattern specified. If there is a pre-phase pattern specified, then that will // be the only set of phases dumped. If neither are specified, then post-phase dump after // PHASE_DETERMINE_FIRST_COLD_BLOCK. if (prePhasePattern != nullptr) { return nullptr; } if (phase != PHASE_DETERMINE_FIRST_COLD_BLOCK) { return nullptr; } } else if (*postPhasePattern != W('*')) { if (wcsstr(postPhasePattern, phaseName) == nullptr) { return nullptr; } } } if (filename == nullptr) { filename = W("default"); } if (wcscmp(filename, W("profiled")) == 0) { if (fgFirstBB->hasProfileWeight()) { createDuplicateFgxFiles = true; goto ONE_FILE_PER_METHOD; } else { return nullptr; } } if (wcscmp(filename, W("hot")) == 0) { if (info.compMethodInfo->regionKind == CORINFO_REGION_HOT) { createDuplicateFgxFiles = true; goto ONE_FILE_PER_METHOD; } else { return nullptr; } } else if (wcscmp(filename, W("cold")) == 0) { if (info.compMethodInfo->regionKind == CORINFO_REGION_COLD) { createDuplicateFgxFiles = true; goto ONE_FILE_PER_METHOD; } else { return nullptr; } } else if (wcscmp(filename, W("jit")) == 0) { if (info.compMethodInfo->regionKind == CORINFO_REGION_JIT) { createDuplicateFgxFiles = true; goto ONE_FILE_PER_METHOD; } else { return nullptr; } } else if (wcscmp(filename, W("all")) == 0) { createDuplicateFgxFiles = true; ONE_FILE_PER_METHOD:; escapedString = fgProcessEscapes(info.compFullName, s_EscapeFileMapping); const char* tierName = compGetTieringName(true); size_t wCharCount = strlen(escapedString) + wcslen(phaseName) + 1 + strlen("~999") + wcslen(type) + strlen(tierName) + 1; if (pathname != nullptr) { wCharCount += wcslen(pathname) + 1; } filename = (LPCWSTR)_alloca(wCharCount * sizeof(WCHAR)); if (pathname != nullptr) { swprintf_s((LPWSTR)filename, wCharCount, W("%s\\%S-%s-%S.%s"), pathname, escapedString, phaseName, tierName, type); } else { swprintf_s((LPWSTR)filename, wCharCount, W("%S.%s"), escapedString, type); } fgxFile = _wfopen(filename, W("r")); // Check if this file already exists if (fgxFile != nullptr) { // For Generic methods we will have both hot and cold versions if (createDuplicateFgxFiles == false) { fclose(fgxFile); return nullptr; } // Yes, this filename already exists, so create a different one by appending ~2, ~3, etc... for (int i = 2; i < 1000; i++) { fclose(fgxFile); if (pathname != nullptr) { swprintf_s((LPWSTR)filename, wCharCount, W("%s\\%S~%d.%s"), pathname, escapedString, i, type); } else { swprintf_s((LPWSTR)filename, wCharCount, W("%S~%d.%s"), escapedString, i, type); } fgxFile = _wfopen(filename, W("r")); // Check if this file exists if (fgxFile == nullptr) { break; } } // If we have already created 1000 files with this name then just fail if (fgxFile != nullptr) { fclose(fgxFile); return nullptr; } } fgxFile = _wfopen(filename, W("a+")); *wbDontClose = false; } else if (wcscmp(filename, W("stdout")) == 0) { fgxFile = jitstdout; *wbDontClose = true; } else if (wcscmp(filename, W("stderr")) == 0) { fgxFile = stderr; *wbDontClose = true; } else { LPCWSTR origFilename = filename; size_t wCharCount = wcslen(origFilename) + wcslen(type) + 2; if (pathname != nullptr) { wCharCount += wcslen(pathname) + 1; } filename = (LPCWSTR)_alloca(wCharCount * sizeof(WCHAR)); if (pathname != nullptr) { swprintf_s((LPWSTR)filename, wCharCount, W("%s\\%s.%s"), pathname, origFilename, type); } else { swprintf_s((LPWSTR)filename, wCharCount, W("%s.%s"), origFilename, type); } fgxFile = _wfopen(filename, W("a+")); *wbDontClose = false; } return fgxFile; } //------------------------------------------------------------------------ // fgDumpFlowGraph: Dump the xml or dot format flow graph, if enabled for this phase. // // Arguments: // phase - A phase identifier to indicate which phase is associated with the dump, // i.e. which phase has just completed. // pos - Are we being called to dump the flow graph pre-phase or post-phase? // // Return Value: // True iff a flowgraph has been dumped. // // Notes: // The xml dumps are the historical mechanism for dumping the flowgraph. // The dot format can be viewed by: // - https://sketchviz.com/ // - Graphviz (http://www.graphviz.org/) // - The command: // "C:\Program Files (x86)\Graphviz2.38\bin\dot.exe" -Tsvg -oFoo.svg -Kdot Foo.dot // will produce a Foo.svg file that can be opened with any svg-capable browser. // - http://rise4fun.com/Agl/ // - Cut and paste the graph from your .dot file, replacing the digraph on the page, and then click the play // button. // - It will show a rotating '/' and then render the graph in the browser. // MSAGL has also been open-sourced to https://github.com/Microsoft/automatic-graph-layout. // // Here are the config values that control it: // COMPlus_JitDumpFg A string (ala the COMPlus_JitDump string) indicating what methods to dump // flowgraphs for. // COMPlus_JitDumpFgDir A path to a directory into which the flowgraphs will be dumped. // COMPlus_JitDumpFgFile The filename to use. The default is "default.[xml|dot]". // Note that the new graphs will be appended to this file if it already exists. // COMPlus_NgenDumpFg Same as COMPlus_JitDumpFg, but for ngen compiles. // COMPlus_NgenDumpFgDir Same as COMPlus_JitDumpFgDir, but for ngen compiles. // COMPlus_NgenDumpFgFile Same as COMPlus_JitDumpFgFile, but for ngen compiles. // COMPlus_JitDumpFgPhase Phase(s) after which to dump the flowgraph. // Set to the short name of a phase to see the flowgraph after that phase. // Leave unset to dump after COLD-BLK (determine first cold block) or set to * // for all phases. // COMPlus_JitDumpFgPrePhase Phase(s) before which to dump the flowgraph. // COMPlus_JitDumpFgDot 0 for xml format, non-zero for dot format. (Default is dot format.) // COMPlus_JitDumpFgEH (dot only) 0 for no exception-handling information; non-zero to include // exception-handling regions. // COMPlus_JitDumpFgLoops (dot only) 0 for no loop information; non-zero to include loop regions. // COMPlus_JitDumpFgConstrained (dot only) 0 == don't constrain to mostly linear layout; non-zero == force // mostly lexical block linear layout. // COMPlus_JitDumpFgBlockId Display blocks with block ID, not just bbNum. // // Example: // // If you want to dump just before and after a single phase, say loop cloning, use: // set COMPlus_JitDumpFgPhase=LP-CLONE // set COMPlus_JitDumpFgPrePhase=LP-CLONE // bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) { bool result = false; bool dontClose = false; #ifdef DEBUG const bool createDotFile = JitConfig.JitDumpFgDot() != 0; const bool includeEH = (JitConfig.JitDumpFgEH() != 0) && !compIsForInlining(); // The loop table is not well maintained after the optimization phases, but there is no single point at which // it is declared invalid. For now, refuse to add loop information starting at the rationalize phase, to // avoid asserts. const bool includeLoops = (JitConfig.JitDumpFgLoops() != 0) && !compIsForInlining() && (phase < PHASE_RATIONALIZE); const bool constrained = JitConfig.JitDumpFgConstrained() != 0; const bool useBlockId = JitConfig.JitDumpFgBlockID() != 0; const bool displayBlockFlags = JitConfig.JitDumpFgBlockFlags() != 0; #else // !DEBUG const bool createDotFile = true; const bool includeEH = false; const bool includeLoops = false; const bool constrained = true; const bool useBlockId = false; const bool displayBlockFlags = false; #endif // !DEBUG FILE* fgxFile = fgOpenFlowGraphFile(&dontClose, phase, pos, createDotFile ? W("dot") : W("fgx")); if (fgxFile == nullptr) { return false; } JITDUMP("Dumping flow graph %s phase %s\n", (pos == PhasePosition::PrePhase) ? "before" : "after", PhaseNames[phase]); bool validWeights = fgHaveValidEdgeWeights; double weightDivisor = (double)BasicBlock::getCalledCount(this); const char* escapedString; const char* regionString = "NONE"; if (info.compMethodInfo->regionKind == CORINFO_REGION_HOT) { regionString = "HOT"; } else if (info.compMethodInfo->regionKind == CORINFO_REGION_COLD) { regionString = "COLD"; } else if (info.compMethodInfo->regionKind == CORINFO_REGION_JIT) { regionString = "JIT"; } if (createDotFile) { fprintf(fgxFile, "digraph FlowGraph {\n"); fprintf(fgxFile, " graph [label = \"%s%s\\n%s\\n%s\"];\n", info.compMethodName, compIsForInlining() ? "\\n(inlinee)" : "", (pos == PhasePosition::PrePhase) ? "before" : "after", PhaseNames[phase]); fprintf(fgxFile, " node [shape = \"Box\"];\n"); } else { fprintf(fgxFile, "<method"); escapedString = fgProcessEscapes(info.compFullName, s_EscapeMapping); fprintf(fgxFile, "\n name=\"%s\"", escapedString); escapedString = fgProcessEscapes(info.compClassName, s_EscapeMapping); fprintf(fgxFile, "\n className=\"%s\"", escapedString); escapedString = fgProcessEscapes(info.compMethodName, s_EscapeMapping); fprintf(fgxFile, "\n methodName=\"%s\"", escapedString); fprintf(fgxFile, "\n ngenRegion=\"%s\"", regionString); fprintf(fgxFile, "\n bytesOfIL=\"%d\"", info.compILCodeSize); fprintf(fgxFile, "\n localVarCount=\"%d\"", lvaCount); if (fgHaveProfileData()) { fprintf(fgxFile, "\n calledCount=\"%f\"", fgCalledCount); fprintf(fgxFile, "\n profileData=\"true\""); } if (compHndBBtabCount > 0) { fprintf(fgxFile, "\n hasEHRegions=\"true\""); } if (fgHasLoops) { fprintf(fgxFile, "\n hasLoops=\"true\""); } if (validWeights) { fprintf(fgxFile, "\n validEdgeWeights=\"true\""); if (!fgSlopUsedInEdgeWeights && !fgRangeUsedInEdgeWeights) { fprintf(fgxFile, "\n exactEdgeWeights=\"true\""); } } if (fgFirstColdBlock != nullptr) { fprintf(fgxFile, "\n firstColdBlock=\"%d\"", fgFirstColdBlock->bbNum); } fprintf(fgxFile, ">"); fprintf(fgxFile, "\n <blocks"); fprintf(fgxFile, "\n blockCount=\"%d\"", fgBBcount); fprintf(fgxFile, ">"); } // In some cases, we want to change the display based on whether an edge is lexically backwards, forwards, // or lexical successor. Also, for the region tree, using the lexical order is useful for determining where // to insert in the tree, to determine nesting. We'd like to use the bbNum to do this. However, we don't // want to renumber the blocks. So, create a mapping of bbNum to ordinal, and compare block order by // comparing the mapped ordinals instead. // // For inlinees, the max block number of the inliner is used, so we need to allocate the block map based on // that size, even though it means allocating a block map possibly much bigger than what's required for just // the inlinee blocks. unsigned blkMapSize = 1 + impInlineRoot()->fgBBNumMax; unsigned blockOrdinal = 1; unsigned* blkMap = new (this, CMK_DebugOnly) unsigned[blkMapSize]; memset(blkMap, 0, sizeof(unsigned) * blkMapSize); for (BasicBlock* const block : Blocks()) { assert(block->bbNum < blkMapSize); blkMap[block->bbNum] = blockOrdinal++; } static const char* kindImage[] = {"EHFINALLYRET", "EHFILTERRET", "EHCATCHRET", "THROW", "RETURN", "NONE", "ALWAYS", "LEAVE", "CALLFINALLY", "COND", "SWITCH"}; BasicBlock* block; for (block = fgFirstBB, blockOrdinal = 1; block != nullptr; block = block->bbNext, blockOrdinal++) { if (createDotFile) { fprintf(fgxFile, " " FMT_BB " [label = \"", block->bbNum); if (useBlockId) { fprintf(fgxFile, "%s", block->dspToString()); } else { fprintf(fgxFile, FMT_BB, block->bbNum); } if (displayBlockFlags) { // Don't display the `[` `]` unless we're going to display something. const BasicBlockFlags allDisplayedBlockFlags = BBF_TRY_BEG | BBF_FUNCLET_BEG | BBF_RUN_RARELY | BBF_LOOP_HEAD | BBF_LOOP_PREHEADER | BBF_LOOP_ALIGN; if (block->bbFlags & allDisplayedBlockFlags) { // Display a very few, useful, block flags fprintf(fgxFile, " ["); if (block->bbFlags & BBF_TRY_BEG) { fprintf(fgxFile, "T"); } if (block->bbFlags & BBF_FUNCLET_BEG) { fprintf(fgxFile, "F"); } if (block->bbFlags & BBF_RUN_RARELY) { fprintf(fgxFile, "R"); } if (block->bbFlags & BBF_LOOP_HEAD) { fprintf(fgxFile, "L"); } if (block->bbFlags & BBF_LOOP_PREHEADER) { fprintf(fgxFile, "P"); } if (block->bbFlags & BBF_LOOP_ALIGN) { fprintf(fgxFile, "A"); } fprintf(fgxFile, "]"); } } if (block->bbJumpKind == BBJ_COND) { fprintf(fgxFile, "\\n"); // Include a line with the basics of the branch condition, if possible. // Find the loop termination test at the bottom of the loop. Statement* condStmt = block->lastStmt(); if (condStmt != nullptr) { GenTree* const condTree = condStmt->GetRootNode(); noway_assert(condTree->gtOper == GT_JTRUE); GenTree* const compareTree = condTree->AsOp()->gtOp1; fgDumpTree(fgxFile, compareTree); } } // "Raw" Profile weight if (block->hasProfileWeight()) { fprintf(fgxFile, "\\n\\n%7.2f", ((double)block->getBBWeight(this)) / BB_UNITY_WEIGHT); } // end of block label fprintf(fgxFile, "\""); // other node attributes // if (block == fgFirstBB) { fprintf(fgxFile, ", shape = \"house\""); } else if (block->bbJumpKind == BBJ_RETURN) { fprintf(fgxFile, ", shape = \"invhouse\""); } else if (block->bbJumpKind == BBJ_THROW) { fprintf(fgxFile, ", shape = \"trapezium\""); } else if (block->bbFlags & BBF_INTERNAL) { fprintf(fgxFile, ", shape = \"note\""); } fprintf(fgxFile, "];\n"); } else { fprintf(fgxFile, "\n <block"); fprintf(fgxFile, "\n id=\"%d\"", block->bbNum); fprintf(fgxFile, "\n ordinal=\"%d\"", blockOrdinal); fprintf(fgxFile, "\n jumpKind=\"%s\"", kindImage[block->bbJumpKind]); if (block->hasTryIndex()) { fprintf(fgxFile, "\n inTry=\"%s\"", "true"); } if (block->hasHndIndex()) { fprintf(fgxFile, "\n inHandler=\"%s\"", "true"); } if ((fgFirstBB->hasProfileWeight()) && ((block->bbFlags & BBF_COLD) == 0)) { fprintf(fgxFile, "\n hot=\"true\""); } if (block->bbFlags & (BBF_HAS_NEWOBJ | BBF_HAS_NEWARRAY)) { fprintf(fgxFile, "\n callsNew=\"true\""); } if (block->bbFlags & BBF_LOOP_HEAD) { fprintf(fgxFile, "\n loopHead=\"true\""); } const char* rootTreeOpName = "n/a"; if (block->IsLIR() || (block->lastStmt() != nullptr)) { if (block->lastNode() != nullptr) { rootTreeOpName = GenTree::OpName(block->lastNode()->OperGet()); } } fprintf(fgxFile, "\n weight="); fprintfDouble(fgxFile, ((double)block->bbWeight) / weightDivisor); // fgGetCodeEstimate() will assert if the costs have not yet been initialized. // fprintf(fgxFile, "\n codeEstimate=\"%d\"", fgGetCodeEstimate(block)); fprintf(fgxFile, "\n startOffset=\"%d\"", block->bbCodeOffs); fprintf(fgxFile, "\n rootTreeOp=\"%s\"", rootTreeOpName); fprintf(fgxFile, "\n endOffset=\"%d\"", block->bbCodeOffsEnd); fprintf(fgxFile, ">"); fprintf(fgxFile, "\n </block>"); } } if (!createDotFile) { fprintf(fgxFile, "\n </blocks>"); fprintf(fgxFile, "\n <edges"); fprintf(fgxFile, "\n edgeCount=\"%d\"", fgEdgeCount); fprintf(fgxFile, ">"); } if (fgComputePredsDone) { unsigned edgeNum = 1; BasicBlock* bTarget; for (bTarget = fgFirstBB; bTarget != nullptr; bTarget = bTarget->bbNext) { double targetWeightDivisor; if (bTarget->bbWeight == BB_ZERO_WEIGHT) { targetWeightDivisor = 1.0; } else { targetWeightDivisor = (double)bTarget->bbWeight; } for (flowList* const edge : bTarget->PredEdges()) { BasicBlock* bSource = edge->getBlock(); double sourceWeightDivisor; if (bSource->bbWeight == BB_ZERO_WEIGHT) { sourceWeightDivisor = 1.0; } else { sourceWeightDivisor = (double)bSource->bbWeight; } if (createDotFile) { fprintf(fgxFile, " " FMT_BB " -> " FMT_BB, bSource->bbNum, bTarget->bbNum); const char* sep = ""; if (blkMap[bSource->bbNum] > blkMap[bTarget->bbNum]) { // Lexical backedge fprintf(fgxFile, " [color=green"); sep = ", "; } else if ((blkMap[bSource->bbNum] + 1) == blkMap[bTarget->bbNum]) { // Lexical successor fprintf(fgxFile, " [color=blue, weight=20"); sep = ", "; } else { fprintf(fgxFile, " ["); } if (validWeights) { weight_t edgeWeight = (edge->edgeWeightMin() + edge->edgeWeightMax()) / 2; fprintf(fgxFile, "%slabel=\"%7.2f\"", sep, (double)edgeWeight / weightDivisor); } fprintf(fgxFile, "];\n"); } else { fprintf(fgxFile, "\n <edge"); fprintf(fgxFile, "\n id=\"%d\"", edgeNum); fprintf(fgxFile, "\n source=\"%d\"", bSource->bbNum); fprintf(fgxFile, "\n target=\"%d\"", bTarget->bbNum); if (bSource->bbJumpKind == BBJ_SWITCH) { if (edge->flDupCount >= 2) { fprintf(fgxFile, "\n switchCases=\"%d\"", edge->flDupCount); } if (bSource->bbJumpSwt->getDefault() == bTarget) { fprintf(fgxFile, "\n switchDefault=\"true\""); } } if (validWeights) { weight_t edgeWeight = (edge->edgeWeightMin() + edge->edgeWeightMax()) / 2; fprintf(fgxFile, "\n weight="); fprintfDouble(fgxFile, ((double)edgeWeight) / weightDivisor); if (edge->edgeWeightMin() != edge->edgeWeightMax()) { fprintf(fgxFile, "\n minWeight="); fprintfDouble(fgxFile, ((double)edge->edgeWeightMin()) / weightDivisor); fprintf(fgxFile, "\n maxWeight="); fprintfDouble(fgxFile, ((double)edge->edgeWeightMax()) / weightDivisor); } if (edgeWeight > 0) { if (edgeWeight < bSource->bbWeight) { fprintf(fgxFile, "\n out="); fprintfDouble(fgxFile, ((double)edgeWeight) / sourceWeightDivisor); } if (edgeWeight < bTarget->bbWeight) { fprintf(fgxFile, "\n in="); fprintfDouble(fgxFile, ((double)edgeWeight) / targetWeightDivisor); } } } } if (!createDotFile) { fprintf(fgxFile, ">"); fprintf(fgxFile, "\n </edge>"); } ++edgeNum; } } } // For dot, show edges w/o pred lists, and add invisible bbNext links. // Also, add EH and/or loop regions as "cluster" subgraphs, if requested. // if (createDotFile) { for (BasicBlock* const bSource : Blocks()) { if (constrained) { // Invisible edge for bbNext chain // if (bSource->bbNext != nullptr) { fprintf(fgxFile, " " FMT_BB " -> " FMT_BB " [style=\"invis\", weight=25];\n", bSource->bbNum, bSource->bbNext->bbNum); } } if (fgComputePredsDone) { // Already emitted pred edges above. // continue; } // Emit successor edges // for (BasicBlock* const bTarget : bSource->Succs()) { fprintf(fgxFile, " " FMT_BB " -> " FMT_BB, bSource->bbNum, bTarget->bbNum); if (blkMap[bSource->bbNum] > blkMap[bTarget->bbNum]) { // Lexical backedge fprintf(fgxFile, " [color=green]\n"); } else if ((blkMap[bSource->bbNum] + 1) == blkMap[bTarget->bbNum]) { // Lexical successor fprintf(fgxFile, " [color=blue]\n"); } else { fprintf(fgxFile, ";\n"); } } } if ((includeEH && (compHndBBtabCount > 0)) || (includeLoops && (optLoopCount > 0))) { // Generate something like: // subgraph cluster_0 { // label = "xxx"; // color = yyy; // bb; bb; // subgraph { // label = "aaa"; // color = bbb; // bb; bb... // } // ... // } // // Thus, the subgraphs need to be nested to show the region nesting. // // The EH table is in order, top-to-bottom, most nested to least nested where // there is a parent/child relationship. The loop table the opposite: it is // in order from the least nested to most nested. // // Build a region tree, collecting all the regions we want to display, // and then walk it to emit the regions. // RegionGraph: represent non-overlapping, possibly nested, block ranges in the flow graph. class RegionGraph { public: enum class RegionType { Root, EH, Loop }; private: struct Region { Region(RegionType rgnType, const char* rgnName, BasicBlock* bbStart, BasicBlock* bbEnd) : m_rgnNext(nullptr) , m_rgnChild(nullptr) , m_rgnType(rgnType) , m_bbStart(bbStart) , m_bbEnd(bbEnd) { strcpy_s(m_rgnName, sizeof(m_rgnName), rgnName); } Region* m_rgnNext; Region* m_rgnChild; RegionType m_rgnType; char m_rgnName[30]; BasicBlock* m_bbStart; BasicBlock* m_bbEnd; }; public: RegionGraph(Compiler* comp, unsigned* blkMap, unsigned blkMapSize) : m_comp(comp), m_rgnRoot(nullptr), m_blkMap(blkMap), m_blkMapSize(blkMapSize) { // Create a root region that encompasses the whole function. m_rgnRoot = new (m_comp, CMK_DebugOnly) Region(RegionType::Root, "Root", comp->fgFirstBB, comp->fgLastBB); } //------------------------------------------------------------------------ // Insert: Insert a region [start..end] (inclusive) into the graph. // // Arguments: // name - the textual label to use for the region // rgnType - the region type // start - start block of the region // end - last block of the region // void Insert(const char* name, RegionType rgnType, BasicBlock* start, BasicBlock* end) { JITDUMP("Insert region: %s, type: %s, start: " FMT_BB ", end: " FMT_BB "\n", name, GetRegionType(rgnType), start->bbNum, end->bbNum); assert(start != nullptr); assert(end != nullptr); Region* newRgn = new (m_comp, CMK_DebugOnly) Region(rgnType, name, start, end); unsigned newStartOrdinal = m_blkMap[start->bbNum]; unsigned newEndOrdinal = m_blkMap[end->bbNum]; Region* curRgn = m_rgnRoot; unsigned curStartOrdinal = m_blkMap[curRgn->m_bbStart->bbNum]; unsigned curEndOrdinal = m_blkMap[curRgn->m_bbEnd->bbNum]; // A range can be a single block, but there can be no overlap between ranges. assert(newStartOrdinal <= newEndOrdinal); assert(curStartOrdinal <= curEndOrdinal); assert(newStartOrdinal >= curStartOrdinal); assert(newEndOrdinal <= curEndOrdinal); // We know the new region will be part of the current region. Should it be a direct // child, or put within one of the existing children? Region** lastChildPtr = &curRgn->m_rgnChild; Region* child = curRgn->m_rgnChild; while (child != nullptr) { unsigned childStartOrdinal = m_blkMap[child->m_bbStart->bbNum]; unsigned childEndOrdinal = m_blkMap[child->m_bbEnd->bbNum]; // Consider the following cases, where each "x" is a block in the range: // xxxxxxx // current 'child' range; we're comparing against this // xxxxxxx // (1) same range; could be considered child or parent // xxxxxxxxx // (2) parent range, shares last block // xxxxxxxxx // (3) parent range, shares first block // xxxxxxxxxxx // (4) fully overlapping parent range // xx // (5) non-overlapping preceding sibling range // xx // (6) non-overlapping following sibling range // xxx // (7) child range // xxx // (8) child range, shares same start block // x // (9) single-block child range, shares same start block // xxx // (10) child range, shares same end block // x // (11) single-block child range, shares same end block // xxxxxxx // illegal: overlapping ranges // xxx // illegal: overlapping ranges (shared child start block and new end block) // xxxxxxx // illegal: overlapping ranges // xxx // illegal: overlapping ranges (shared child end block and new start block) // Assert the child is properly nested within the parent. // Note that if regions have the same start and end, you can't tell which is nested within the // other, though it shouldn't matter. assert(childStartOrdinal <= childEndOrdinal); assert(curStartOrdinal <= childStartOrdinal); assert(childEndOrdinal <= curEndOrdinal); // Should the new region be before this child? // Case (5). if (newEndOrdinal < childStartOrdinal) { // Insert before this child. newRgn->m_rgnNext = child; *lastChildPtr = newRgn; break; } else if ((newStartOrdinal >= childStartOrdinal) && (newEndOrdinal <= childEndOrdinal)) { // Insert as a child of this child. // Need to recurse to walk the child's children list to see where it belongs. // Case (1), (7), (8), (9), (10), (11). curStartOrdinal = m_blkMap[child->m_bbStart->bbNum]; curEndOrdinal = m_blkMap[child->m_bbEnd->bbNum]; lastChildPtr = &child->m_rgnChild; child = child->m_rgnChild; continue; } else if (newStartOrdinal <= childStartOrdinal) { // The new region is a parent of one or more of the existing children. // Case (2), (3), (4). // Find all the children it encompasses. Region** lastEndChildPtr = &child->m_rgnNext; Region* endChild = child->m_rgnNext; while (endChild != nullptr) { unsigned endChildStartOrdinal = m_blkMap[endChild->m_bbStart->bbNum]; unsigned endChildEndOrdinal = m_blkMap[endChild->m_bbEnd->bbNum]; assert(endChildStartOrdinal <= endChildEndOrdinal); if (newEndOrdinal < endChildStartOrdinal) { // Found the range break; } lastEndChildPtr = &endChild->m_rgnNext; endChild = endChild->m_rgnNext; } // The range is [child..endChild previous]. If endChild is nullptr, then // the range is to the end of the parent. Move these all to be // children of newRgn, and put newRgn in where `child` is. newRgn->m_rgnNext = endChild; *lastChildPtr = newRgn; newRgn->m_rgnChild = child; *lastEndChildPtr = nullptr; break; } // Else, look for next child. // Case (6). lastChildPtr = &child->m_rgnNext; child = child->m_rgnNext; } if (child == nullptr) { // Insert as the last child (could be the only child). *lastChildPtr = newRgn; } } #ifdef DEBUG const unsigned dumpIndentIncrement = 2; // How much to indent each nested level. //------------------------------------------------------------------------ // GetRegionType: get a textual name for the region type, to be used in dumps. // // Arguments: // rgnType - the region type // static const char* GetRegionType(RegionType rgnType) { switch (rgnType) { case RegionType::Root: return "Root"; case RegionType::EH: return "EH"; case RegionType::Loop: return "Loop"; default: return "UNKNOWN"; } } //------------------------------------------------------------------------ // DumpRegionNode: Region graph dump helper to dump a region node at the given indent, // and recursive dump its children. // // Arguments: // rgn - the region to dump // indent - number of leading characters to indent all output // void DumpRegionNode(Region* rgn, unsigned indent) const { printf("%*s======\n", indent, ""); printf("%*sType: %s\n", indent, "", GetRegionType(rgn->m_rgnType)); printf("%*sName: %s\n", indent, "", rgn->m_rgnName); printf("%*sRange: " FMT_BB ".." FMT_BB "\n", indent, "", rgn->m_bbStart->bbNum, rgn->m_bbEnd->bbNum); for (Region* child = rgn->m_rgnChild; child != nullptr; child = child->m_rgnNext) { DumpRegionNode(child, indent + dumpIndentIncrement); } } //------------------------------------------------------------------------ // Dump: dump the entire region graph // void Dump() { printf("Region graph:\n"); DumpRegionNode(m_rgnRoot, 0); printf("\n"); } //------------------------------------------------------------------------ // VerifyNode: verify the region graph rooted at `rgn`. // // Arguments: // rgn - the node (and its children) to check. // void Verify(Region* rgn) { // The region needs to be a non-overlapping parent to all its children. // The children need to be non-overlapping, and in increasing order. unsigned rgnStartOrdinal = m_blkMap[rgn->m_bbStart->bbNum]; unsigned rgnEndOrdinal = m_blkMap[rgn->m_bbEnd->bbNum]; assert(rgnStartOrdinal <= rgnEndOrdinal); Region* child = rgn->m_rgnChild; Region* lastChild = nullptr; if (child != nullptr) { unsigned childStartOrdinal = m_blkMap[child->m_bbStart->bbNum]; unsigned childEndOrdinal = m_blkMap[child->m_bbEnd->bbNum]; assert(childStartOrdinal <= childEndOrdinal); assert(rgnStartOrdinal <= childStartOrdinal); while (true) { Verify(child); lastChild = child; unsigned lastChildStartOrdinal = childStartOrdinal; unsigned lastChildEndOrdinal = childEndOrdinal; child = child->m_rgnNext; if (child == nullptr) { break; } childStartOrdinal = m_blkMap[child->m_bbStart->bbNum]; childEndOrdinal = m_blkMap[child->m_bbEnd->bbNum]; assert(childStartOrdinal <= childEndOrdinal); // The children can't overlap; they can't share any blocks. assert(lastChildEndOrdinal < childStartOrdinal); } // The parent region must fully include the last child. assert(childEndOrdinal <= rgnEndOrdinal); } } //------------------------------------------------------------------------ // Verify: verify the region graph satisfies proper nesting, and other legality rules. // void Verify() { assert(m_comp != nullptr); assert(m_blkMap != nullptr); for (unsigned i = 0; i < m_blkMapSize; i++) { assert(m_blkMap[i] < m_blkMapSize); } // The root region has no siblings. assert(m_rgnRoot != nullptr); assert(m_rgnRoot->m_rgnNext == nullptr); Verify(m_rgnRoot); } #endif // DEBUG //------------------------------------------------------------------------ // Output: output the region graph to the .dot file // // Arguments: // file - the file to write output to. // void Output(FILE* file) { unsigned clusterNum = 0; // Output the regions; don't output the top (root) region that represents the whole function. for (Region* child = m_rgnRoot->m_rgnChild; child != nullptr; child = child->m_rgnNext) { OutputRegion(file, clusterNum, child, 4); } fprintf(file, "\n"); } private: //------------------------------------------------------------------------ // GetColorForRegion: get a color name to use for a region // // Arguments: // rgn - the region for which we need a color // static const char* GetColorForRegion(Region* rgn) { RegionType rgnType = rgn->m_rgnType; switch (rgnType) { case RegionType::EH: return "red"; case RegionType::Loop: return "blue"; default: return "black"; } } //------------------------------------------------------------------------ // OutputRegion: helper function to output a region and its nested children // to the .dot file. // // Arguments: // file - the file to write output to. // clusterNum - the number of this dot "cluster". This is updated as we // create new clusters. // rgn - the region to output. // indent - the current indent level, in characters. // void OutputRegion(FILE* file, unsigned& clusterNum, Region* rgn, unsigned indent) { fprintf(file, "%*ssubgraph cluster_%u {\n", indent, "", clusterNum); indent += 4; fprintf(file, "%*slabel = \"%s\";\n", indent, "", rgn->m_rgnName); fprintf(file, "%*scolor = %s;\n", indent, "", GetColorForRegion(rgn)); clusterNum++; bool needIndent = true; BasicBlock* bbCur = rgn->m_bbStart; BasicBlock* bbEnd = rgn->m_bbEnd->bbNext; Region* child = rgn->m_rgnChild; BasicBlock* childCurBB = (child == nullptr) ? nullptr : child->m_bbStart; // Count the children and assert we output all of them. unsigned totalChildren = 0; unsigned childCount = 0; for (Region* tmpChild = child; tmpChild != nullptr; tmpChild = tmpChild->m_rgnNext) { totalChildren++; } while (bbCur != bbEnd) { // Output from bbCur to current child first block. while ((bbCur != childCurBB) && (bbCur != bbEnd)) { fprintf(file, "%*s" FMT_BB ";", needIndent ? indent : 0, "", bbCur->bbNum); needIndent = false; bbCur = bbCur->bbNext; } if (bbCur == bbEnd) { // We're done at this level. break; } else { assert(bbCur != nullptr); // Or else we should also have `bbCur == bbEnd` assert(child != nullptr); // If there is a child, output that child. if (!needIndent) { // We've printed some basic blocks, so put the subgraph on a new line. fprintf(file, "\n"); } OutputRegion(file, clusterNum, child, indent); needIndent = true; childCount++; bbCur = child->m_bbEnd->bbNext; // Next, output blocks after this child. child = child->m_rgnNext; // Move to the next child, if any. childCurBB = (child == nullptr) ? nullptr : child->m_bbStart; } } // Put the end brace on its own line and leave the cursor at the beginning of the line for the // parent. indent -= 4; fprintf(file, "\n%*s}\n", indent, ""); assert(childCount == totalChildren); } Compiler* m_comp; Region* m_rgnRoot; unsigned* m_blkMap; unsigned m_blkMapSize; }; // Define the region graph object. We'll add regions to this, then output the graph. RegionGraph rgnGraph(this, blkMap, blkMapSize); // Add the EH regions to the region graph. An EH region consists of a region for the // `try`, a region for the handler, and, for filter/filter-handlers, a region for the // `filter` as well. if (includeEH) { char name[30]; unsigned XTnum; EHblkDsc* ehDsc; for (XTnum = 0, ehDsc = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, ehDsc++) { sprintf_s(name, sizeof(name), "EH#%u try", XTnum); rgnGraph.Insert(name, RegionGraph::RegionType::EH, ehDsc->ebdTryBeg, ehDsc->ebdTryLast); const char* handlerType = ""; switch (ehDsc->ebdHandlerType) { case EH_HANDLER_CATCH: handlerType = "catch"; break; case EH_HANDLER_FILTER: handlerType = "filter-hnd"; break; case EH_HANDLER_FAULT: handlerType = "fault"; break; case EH_HANDLER_FINALLY: handlerType = "finally"; break; case EH_HANDLER_FAULT_WAS_FINALLY: handlerType = "fault-was-finally"; break; } sprintf_s(name, sizeof(name), "EH#%u %s", XTnum, handlerType); rgnGraph.Insert(name, RegionGraph::RegionType::EH, ehDsc->ebdHndBeg, ehDsc->ebdHndLast); if (ehDsc->HasFilter()) { sprintf_s(name, sizeof(name), "EH#%u filter", XTnum); rgnGraph.Insert(name, RegionGraph::RegionType::EH, ehDsc->ebdFilter, ehDsc->ebdHndBeg->bbPrev); } } } // Add regions for the loops. Note that loops are assumed to be contiguous from `lpTop` to `lpBottom`. if (includeLoops) { #ifdef DEBUG const bool displayLoopFlags = JitConfig.JitDumpFgLoopFlags() != 0; #else // !DEBUG const bool displayLoopFlags = false; #endif // !DEBUG char name[30]; for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++) { const LoopDsc& loop = optLoopTable[loopNum]; if (loop.lpFlags & LPFLG_REMOVED) { continue; } sprintf_s(name, sizeof(name), FMT_LP, loopNum); if (displayLoopFlags) { // Display a very few, useful, loop flags strcat_s(name, sizeof(name), " ["); if (loop.lpFlags & LoopFlags::LPFLG_ITER) { strcat_s(name, sizeof(name), "I"); } if (loop.lpFlags & LoopFlags::LPFLG_HAS_PREHEAD) { strcat_s(name, sizeof(name), "P"); } strcat_s(name, sizeof(name), "]"); } rgnGraph.Insert(name, RegionGraph::RegionType::Loop, loop.lpTop, loop.lpBottom); } } // All the regions have been added. Now, output them. DBEXEC(verbose, rgnGraph.Dump()); INDEBUG(rgnGraph.Verify()); rgnGraph.Output(fgxFile); } } if (createDotFile) { fprintf(fgxFile, "}\n"); } else { fprintf(fgxFile, "\n </edges>"); fprintf(fgxFile, "\n</method>\n"); } if (dontClose) { // fgxFile is jitstdout or stderr fprintf(fgxFile, "\n"); } else { fclose(fgxFile); } return result; } #endif // DUMP_FLOWGRAPHS /*****************************************************************************/ #ifdef DEBUG void Compiler::fgDispReach() { printf("------------------------------------------------\n"); printf("BBnum Reachable by \n"); printf("------------------------------------------------\n"); for (BasicBlock* const block : Blocks()) { printf(FMT_BB " : ", block->bbNum); BlockSetOps::Iter iter(this, block->bbReach); unsigned bbNum = 0; while (iter.NextElem(&bbNum)) { printf(FMT_BB " ", bbNum); } printf("\n"); } } void Compiler::fgDispDoms() { // Don't bother printing this when we have a large number of BasicBlocks in the method if (fgBBcount > 256) { return; } printf("------------------------------------------------\n"); printf("BBnum Dominated by\n"); printf("------------------------------------------------\n"); for (unsigned i = 1; i <= fgBBNumMax; ++i) { BasicBlock* current = fgBBInvPostOrder[i]; printf(FMT_BB ": ", current->bbNum); while (current != current->bbIDom) { printf(FMT_BB " ", current->bbNum); current = current->bbIDom; } printf("\n"); } } /*****************************************************************************/ void Compiler::fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth /* = 0 */) { const unsigned __int64 flags = block->bbFlags; unsigned bbNumMax = impInlineRoot()->fgBBNumMax; int maxBlockNumWidth = CountDigits(bbNumMax); maxBlockNumWidth = max(maxBlockNumWidth, 2); int blockNumWidth = CountDigits(block->bbNum); blockNumWidth = max(blockNumWidth, 2); int blockNumPadding = maxBlockNumWidth - blockNumWidth; printf("%s %2u", block->dspToString(blockNumPadding), block->bbRefs); // // Display EH 'try' region index // if (block->hasTryIndex()) { printf(" %2u", block->getTryIndex()); } else { printf(" "); } // // Display EH handler region index // if (block->hasHndIndex()) { printf(" %2u", block->getHndIndex()); } else { printf(" "); } printf(" "); // // Display block predecessor list // unsigned charCnt; if (fgCheapPredsValid) { charCnt = block->dspCheapPreds(); } else { charCnt = block->dspPreds(); } if (charCnt < 19) { printf("%*s", 19 - charCnt, ""); } printf(" "); // // Display block weight // if (block->isMaxBBWeight()) { printf(" MAX "); } else { weight_t weight = block->getBBWeight(this); if (weight > 99999) // Is it going to be more than 6 characters? { if (weight <= 99999 * BB_UNITY_WEIGHT) { // print weight in this format ddddd. printf("%5u.", (unsigned)FloatingPointUtils::round(weight / BB_UNITY_WEIGHT)); } else // print weight in terms of k (i.e. 156k ) { // print weight in this format dddddk weight_t weightK = weight / 1000; printf("%5uk", (unsigned)FloatingPointUtils::round(weightK / BB_UNITY_WEIGHT)); } } else // print weight in this format ddd.dd { printf("%6s", refCntWtd2str(weight)); } } // // Display optional IBC weight column. // Note that iColWidth includes one character for a leading space, if there is an IBC column. // if (ibcColWidth > 0) { if (block->hasProfileWeight()) { printf("%*u", ibcColWidth, (unsigned)FloatingPointUtils::round(block->bbWeight)); } else { // No IBC data. Just print spaces to align the column. printf("%*s", ibcColWidth, ""); } } printf(" "); // // Display natural loop number // if (block->bbNatLoopNum == BasicBlock::NOT_IN_LOOP) { printf(" "); } else { printf("%2d ", block->bbNatLoopNum); } // // Display block IL range // block->dspBlockILRange(); // // Display block branch target // if (flags & BBF_REMOVED) { printf("[removed] "); } else { switch (block->bbJumpKind) { case BBJ_COND: printf("-> " FMT_BB "%*s ( cond )", block->bbJumpDest->bbNum, maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); break; case BBJ_CALLFINALLY: printf("-> " FMT_BB "%*s (callf )", block->bbJumpDest->bbNum, maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); break; case BBJ_ALWAYS: if (flags & BBF_KEEP_BBJ_ALWAYS) { printf("-> " FMT_BB "%*s (ALWAYS)", block->bbJumpDest->bbNum, maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); } else { printf("-> " FMT_BB "%*s (always)", block->bbJumpDest->bbNum, maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); } break; case BBJ_LEAVE: printf("-> " FMT_BB "%*s (leave )", block->bbJumpDest->bbNum, maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); break; case BBJ_EHFINALLYRET: printf("%*s (finret)", maxBlockNumWidth - 2, ""); break; case BBJ_EHFILTERRET: printf("%*s (fltret)", maxBlockNumWidth - 2, ""); break; case BBJ_EHCATCHRET: printf("-> " FMT_BB "%*s ( cret )", block->bbJumpDest->bbNum, maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); break; case BBJ_THROW: printf("%*s (throw )", maxBlockNumWidth - 2, ""); break; case BBJ_RETURN: printf("%*s (return)", maxBlockNumWidth - 2, ""); break; default: printf("%*s ", maxBlockNumWidth - 2, ""); break; case BBJ_SWITCH: { printf("->"); const BBswtDesc* const bbJumpSwt = block->bbJumpSwt; const unsigned jumpCnt = bbJumpSwt->bbsCount; BasicBlock** const jumpTab = bbJumpSwt->bbsDstTab; int switchWidth = 0; for (unsigned i = 0; i < jumpCnt; i++) { printf("%c" FMT_BB, (i == 0) ? ' ' : ',', jumpTab[i]->bbNum); switchWidth += 1 /* space/comma */ + 2 /* BB */ + max(CountDigits(jumpTab[i]->bbNum), 2); const bool isDefault = bbJumpSwt->bbsHasDefault && (i == jumpCnt - 1); if (isDefault) { printf("[def]"); switchWidth += 5; } const bool isDominant = bbJumpSwt->bbsHasDominantCase && (i == bbJumpSwt->bbsDominantCase); if (isDominant) { printf("[dom(" FMT_WT ")]", bbJumpSwt->bbsDominantFraction); switchWidth += 10; } } if (switchWidth < 7) { printf("%*s", 8 - switchWidth, ""); } printf(" (switch)"); } break; } } printf(" "); // // Display block EH region and type, including nesting indicator // if (block->hasTryIndex()) { printf("T%d ", block->getTryIndex()); } else { printf(" "); } if (block->hasHndIndex()) { printf("H%d ", block->getHndIndex()); } else { printf(" "); } if (flags & BBF_FUNCLET_BEG) { printf("F "); } else { printf(" "); } int cnt = 0; switch (block->bbCatchTyp) { case BBCT_NONE: break; case BBCT_FAULT: printf("fault "); cnt += 6; break; case BBCT_FINALLY: printf("finally "); cnt += 8; break; case BBCT_FILTER: printf("filter "); cnt += 7; break; case BBCT_FILTER_HANDLER: printf("filtHnd "); cnt += 8; break; default: printf("catch "); cnt += 6; break; } if (block->bbCatchTyp != BBCT_NONE) { cnt += 2; printf("{ "); /* brace matching editor workaround to compensate for the preceding line: } */ } if (flags & BBF_TRY_BEG) { // Output a brace for every try region that this block opens for (EHblkDsc* const HBtab : EHClauses(this)) { if (HBtab->ebdTryBeg == block) { cnt += 6; printf("try { "); /* brace matching editor workaround to compensate for the preceding line: } */ } } } for (EHblkDsc* const HBtab : EHClauses(this)) { if (HBtab->ebdTryLast == block) { cnt += 2; /* brace matching editor workaround to compensate for the following line: { */ printf("} "); } if (HBtab->ebdHndLast == block) { cnt += 2; /* brace matching editor workaround to compensate for the following line: { */ printf("} "); } if (HBtab->HasFilter() && block->bbNext == HBtab->ebdHndBeg) { cnt += 2; /* brace matching editor workaround to compensate for the following line: { */ printf("} "); } } while (cnt < 12) { cnt++; printf(" "); } // // Display block flags // block->dspFlags(); // Display OSR info // if (opts.IsOSR()) { if (block == fgEntryBB) { printf(" original-entry"); } if (block == fgOSREntryBB) { printf(" osr-entry"); } } printf("\n"); } /**************************************************************************** Dump blocks from firstBlock to lastBlock. */ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees) { BasicBlock* block; // If any block has IBC data, we add an "IBC weight" column just before the 'IL range' column. This column is as // wide as necessary to accommodate all the various IBC weights. It's at least 4 characters wide, to accommodate // the "IBC" title and leading space. int ibcColWidth = 0; for (block = firstBlock; block != nullptr; block = block->bbNext) { if (block->hasProfileWeight()) { int thisIbcWidth = CountDigits(block->bbWeight); ibcColWidth = max(ibcColWidth, thisIbcWidth); } if (block == lastBlock) { break; } } if (ibcColWidth > 0) { ibcColWidth = max(ibcColWidth, 3) + 1; // + 1 for the leading space } unsigned bbNumMax = impInlineRoot()->fgBBNumMax; int maxBlockNumWidth = CountDigits(bbNumMax); maxBlockNumWidth = max(maxBlockNumWidth, 2); int padWidth = maxBlockNumWidth - 2; // Account for functions with a large number of blocks. // clang-format off printf("\n"); printf("------%*s-------------------------------------%*s--------------------------%*s----------------------------------------\n", padWidth, "------------", ibcColWidth, "------------", maxBlockNumWidth, "----"); printf("BBnum %*sBBid ref try hnd %s weight %*s%s lp [IL range] [jump]%*s [EH region] [flags]\n", padWidth, "", fgCheapPredsValid ? "cheap preds" : (fgComputePredsDone ? "preds " : " "), ((ibcColWidth > 0) ? ibcColWidth - 3 : 0), "", // Subtract 3 for the width of "IBC", printed next. ((ibcColWidth > 0) ? "IBC" : ""), maxBlockNumWidth, "" ); printf("------%*s-------------------------------------%*s--------------------------%*s----------------------------------------\n", padWidth, "------------", ibcColWidth, "------------", maxBlockNumWidth, "----"); // clang-format on for (block = firstBlock; block; block = block->bbNext) { // First, do some checking on the bbPrev links if (block->bbPrev) { if (block->bbPrev->bbNext != block) { printf("bad prev link\n"); } } else if (block != fgFirstBB) { printf("bad prev link!\n"); } if (block == fgFirstColdBlock) { printf( "~~~~~~%*s~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%*s~~~~~~~~~~~~~~~~~~~~~~~~~~%*s~~~~~~~~~~~~~~~~~~~~~~~~" "~~~~~~~~~~~~~~~~\n", padWidth, "~~~~~~~~~~~~", ibcColWidth, "~~~~~~~~~~~~", maxBlockNumWidth, "~~~~"); } #if defined(FEATURE_EH_FUNCLETS) if (block == fgFirstFuncletBB) { printf( "++++++%*s+++++++++++++++++++++++++++++++++++++%*s++++++++++++++++++++++++++%*s++++++++++++++++++++++++" "++++++++++++++++ funclets follow\n", padWidth, "++++++++++++", ibcColWidth, "++++++++++++", maxBlockNumWidth, "++++"); } #endif // FEATURE_EH_FUNCLETS fgTableDispBasicBlock(block, ibcColWidth); if (block == lastBlock) { break; } } printf( "------%*s-------------------------------------%*s--------------------------%*s--------------------------------" "--------\n", padWidth, "------------", ibcColWidth, "------------", maxBlockNumWidth, "----"); if (dumpTrees) { fgDumpTrees(firstBlock, lastBlock); } } /*****************************************************************************/ void Compiler::fgDispBasicBlocks(bool dumpTrees) { fgDispBasicBlocks(fgFirstBB, nullptr, dumpTrees); } //------------------------------------------------------------------------ // fgDumpStmtTree: dump the statement and the basic block number. // // Arguments: // stmt - the statement to dump; // bbNum - the basic block number to dump. // void Compiler::fgDumpStmtTree(Statement* stmt, unsigned bbNum) { printf("\n***** " FMT_BB "\n", bbNum); gtDispStmt(stmt); } //------------------------------------------------------------------------ // Compiler::fgDumpBlock: dumps the contents of the given block to stdout. // // Arguments: // block - The block to dump. // void Compiler::fgDumpBlock(BasicBlock* block) { printf("\n------------ "); block->dspBlockHeader(this); if (!block->IsLIR()) { for (Statement* const stmt : block->Statements()) { fgDumpStmtTree(stmt, block->bbNum); } } else { gtDispRange(LIR::AsRange(block)); } } //------------------------------------------------------------------------ // fgDumpTrees: dumps the trees for every block in a range of blocks. // // Arguments: // firstBlock - The first block to dump. // lastBlock - The last block to dump. // void Compiler::fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock) { // Note that typically we have already called fgDispBasicBlocks() // so we don't need to print the preds and succs again here. for (BasicBlock* block = firstBlock; block != nullptr; block = block->bbNext) { fgDumpBlock(block); if (block == lastBlock) { break; } } printf("\n---------------------------------------------------------------------------------------------------------" "----------\n"); } /***************************************************************************** * Try to create as many candidates for GTF_MUL_64RSLT as possible. * We convert 'intOp1*intOp2' into 'int(long(nop(intOp1))*long(intOp2))'. */ /* static */ Compiler::fgWalkResult Compiler::fgStress64RsltMulCB(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; Compiler* pComp = data->compiler; if (tree->gtOper != GT_MUL || tree->gtType != TYP_INT || (tree->gtOverflow())) { return WALK_CONTINUE; } JITDUMP("STRESS_64RSLT_MUL before:\n"); DISPTREE(tree); // To ensure optNarrowTree() doesn't fold back to the original tree. tree->AsOp()->gtOp1 = pComp->gtNewCastNode(TYP_LONG, tree->AsOp()->gtOp1, false, TYP_LONG); tree->AsOp()->gtOp1 = pComp->gtNewOperNode(GT_NOP, TYP_LONG, tree->AsOp()->gtOp1); tree->AsOp()->gtOp1 = pComp->gtNewCastNode(TYP_LONG, tree->AsOp()->gtOp1, false, TYP_LONG); tree->AsOp()->gtOp2 = pComp->gtNewCastNode(TYP_LONG, tree->AsOp()->gtOp2, false, TYP_LONG); tree->gtType = TYP_LONG; *pTree = pComp->gtNewCastNode(TYP_INT, tree, false, TYP_INT); JITDUMP("STRESS_64RSLT_MUL after:\n"); DISPTREE(*pTree); return WALK_SKIP_SUBTREES; } void Compiler::fgStress64RsltMul() { if (!compStressCompile(STRESS_64RSLT_MUL, 20)) { return; } fgWalkAllTreesPre(fgStress64RsltMulCB, (void*)this); } // BBPredsChecker checks jumps from the block's predecessors to the block. class BBPredsChecker { public: BBPredsChecker(Compiler* compiler) : comp(compiler) { } unsigned CheckBBPreds(BasicBlock* block, unsigned curTraversalStamp); private: bool CheckEhTryDsc(BasicBlock* block, BasicBlock* blockPred, EHblkDsc* ehTryDsc); bool CheckEhHndDsc(BasicBlock* block, BasicBlock* blockPred, EHblkDsc* ehHndlDsc); bool CheckJump(BasicBlock* blockPred, BasicBlock* block); bool CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block); private: Compiler* comp; }; //------------------------------------------------------------------------ // CheckBBPreds: Check basic block predecessors list. // // Notes: // This DEBUG routine checks that all predecessors have the correct traversal stamp // and have correct jumps to the block. // It calculates the number of incoming edges from the internal block, // i.e. it does not count the global incoming edge for the first block. // // Arguments: // block - the block to process; // curTraversalStamp - current traversal stamp to distinguish different iterations. // // Return value: // the number of incoming edges for the block. unsigned BBPredsChecker::CheckBBPreds(BasicBlock* block, unsigned curTraversalStamp) { if (comp->fgCheapPredsValid) { return 0; } if (!comp->fgComputePredsDone) { assert(block->bbPreds == nullptr); return 0; } unsigned blockRefs = 0; for (flowList* const pred : block->PredEdges()) { blockRefs += pred->flDupCount; BasicBlock* blockPred = pred->getBlock(); // Make sure this pred is part of the BB list. assert(blockPred->bbTraversalStamp == curTraversalStamp); EHblkDsc* ehTryDsc = comp->ehGetBlockTryDsc(block); if (ehTryDsc != nullptr) { assert(CheckEhTryDsc(block, blockPred, ehTryDsc)); } EHblkDsc* ehHndDsc = comp->ehGetBlockHndDsc(block); if (ehHndDsc != nullptr) { assert(CheckEhHndDsc(block, blockPred, ehHndDsc)); } assert(CheckJump(blockPred, block)); } // Make sure preds are in increasing BBnum order // assert(block->checkPredListOrder()); return blockRefs; } bool BBPredsChecker::CheckEhTryDsc(BasicBlock* block, BasicBlock* blockPred, EHblkDsc* ehTryDsc) { // You can jump to the start of a try if (ehTryDsc->ebdTryBeg == block) { return true; } // You can jump within the same try region if (comp->bbInTryRegions(block->getTryIndex(), blockPred)) { return true; } // The catch block can jump back into the middle of the try if (comp->bbInCatchHandlerRegions(block, blockPred)) { return true; } // The end of a finally region is a BBJ_EHFINALLYRET block (during importing, BBJ_LEAVE) which // is marked as "returning" to the BBJ_ALWAYS block following the BBJ_CALLFINALLY // block that does a local call to the finally. This BBJ_ALWAYS is within // the try region protected by the finally (for x86, ARM), but that's ok. BasicBlock* prevBlock = block->bbPrev; if (prevBlock->bbJumpKind == BBJ_CALLFINALLY && block->bbJumpKind == BBJ_ALWAYS && blockPred->bbJumpKind == BBJ_EHFINALLYRET) { return true; } printf("Jump into the middle of try region: " FMT_BB " branches to " FMT_BB "\n", blockPred->bbNum, block->bbNum); assert(!"Jump into middle of try region"); return false; } bool BBPredsChecker::CheckEhHndDsc(BasicBlock* block, BasicBlock* blockPred, EHblkDsc* ehHndlDsc) { // You can do a BBJ_EHFINALLYRET or BBJ_EHFILTERRET into a handler region if (blockPred->KindIs(BBJ_EHFINALLYRET, BBJ_EHFILTERRET)) { return true; } // Our try block can call our finally block if ((block->bbCatchTyp == BBCT_FINALLY) && (blockPred->bbJumpKind == BBJ_CALLFINALLY) && comp->ehCallFinallyInCorrectRegion(blockPred, block->getHndIndex())) { return true; } // You can jump within the same handler region if (comp->bbInHandlerRegions(block->getHndIndex(), blockPred)) { return true; } // A filter can jump to the start of the filter handler if (ehHndlDsc->HasFilter()) { return true; } printf("Jump into the middle of handler region: " FMT_BB " branches to " FMT_BB "\n", blockPred->bbNum, block->bbNum); assert(!"Jump into the middle of handler region"); return false; } bool BBPredsChecker::CheckJump(BasicBlock* blockPred, BasicBlock* block) { switch (blockPred->bbJumpKind) { case BBJ_COND: assert(blockPred->bbNext == block || blockPred->bbJumpDest == block); return true; case BBJ_NONE: assert(blockPred->bbNext == block); return true; case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: assert(blockPred->bbJumpDest == block); return true; case BBJ_EHFINALLYRET: assert(CheckEHFinallyRet(blockPred, block)); return true; case BBJ_THROW: case BBJ_RETURN: assert(!"THROW and RETURN block cannot be in the predecessor list!"); break; case BBJ_SWITCH: for (BasicBlock* const bTarget : blockPred->SwitchTargets()) { if (block == bTarget) { return true; } } assert(!"SWITCH in the predecessor list with no jump label to BLOCK!"); break; default: assert(!"Unexpected bbJumpKind"); break; } return false; } bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) { // If the current block is a successor to a BBJ_EHFINALLYRET (return from finally), // then the lexically previous block should be a call to the same finally. // Verify all of that. unsigned hndIndex = blockPred->getHndIndex(); EHblkDsc* ehDsc = comp->ehGetDsc(hndIndex); BasicBlock* finBeg = ehDsc->ebdHndBeg; // Because there is no bbPrev, we have to search for the lexically previous // block. We can shorten the search by only looking in places where it is legal // to have a call to the finally. BasicBlock* begBlk; BasicBlock* endBlk; comp->ehGetCallFinallyBlockRange(hndIndex, &begBlk, &endBlk); for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) { continue; } if (block == bcall->bbNext) { return true; } } #if defined(FEATURE_EH_FUNCLETS) if (comp->fgFuncletsCreated) { // There is no easy way to search just the funclets that were pulled out of // the corresponding try body, so instead we search all the funclets, and if // we find a potential 'hit' we check if the funclet we're looking at is // from the correct try region. for (BasicBlock* const bcall : comp->Blocks(comp->fgFirstFuncletBB)) { if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) { continue; } if (block != bcall->bbNext) { continue; } if (comp->ehCallFinallyInCorrectRegion(bcall, hndIndex)) { return true; } } } #endif // FEATURE_EH_FUNCLETS assert(!"BBJ_EHFINALLYRET predecessor of block that doesn't follow a BBJ_CALLFINALLY!"); return false; } //------------------------------------------------------------------------------ // fgDebugCheckBBNumIncreasing: Check that the block list bbNum are in increasing order in the bbNext // traversal. Given a block B1 and its bbNext successor B2, this means `B1->bbNum < B2->bbNum`, but not // that `B1->bbNum + 1 == B2->bbNum` (which is true after renumbering). This can be used as a precondition // to a phase that expects this ordering to compare block numbers (say, to look for backwards branches) // and doesn't want to call fgRenumberBlocks(), to avoid that potential expense. // void Compiler::fgDebugCheckBBNumIncreasing() { for (BasicBlock* const block : Blocks()) { assert(block->bbNext == nullptr || (block->bbNum < block->bbNext->bbNum)); } } // This variable is used to generate "traversal labels": one-time constants with which // we label basic blocks that are members of the basic block list, in order to have a // fast, high-probability test for membership in that list. Type is "volatile" because // it's incremented with an atomic operation, which wants a volatile type; "long" so that // wrap-around to 0 (which I think has the highest probability of accidental collision) is // postponed a *long* time. static volatile int bbTraverseLabel = 1; /***************************************************************************** * * A DEBUG routine to check the consistency of the flowgraph, * i.e. bbNum, bbRefs, bbPreds have to be up to date. * *****************************************************************************/ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRefs /* = true */) { #ifdef DEBUG if (verbose) { printf("*************** In fgDebugCheckBBlist\n"); } #endif // DEBUG fgDebugCheckBlockLinks(); fgFirstBBisScratch(); if (fgBBcount > 10000 && expensiveDebugCheckLevel < 1) { // The basic block checks are too expensive if there are too many blocks, // so give up unless we've been told to try hard. return; } #if defined(FEATURE_EH_FUNCLETS) bool reachedFirstFunclet = false; if (fgFuncletsCreated) { // // Make sure that fgFirstFuncletBB is accurate. // It should be the first basic block in a handler region. // if (fgFirstFuncletBB != nullptr) { assert(fgFirstFuncletBB->hasHndIndex() == true); assert(fgFirstFuncletBB->bbFlags & BBF_FUNCLET_BEG); } } #endif // FEATURE_EH_FUNCLETS /* Check bbNum, bbRefs and bbPreds */ // First, pick a traversal stamp, and label all the blocks with it. unsigned curTraversalStamp = unsigned(InterlockedIncrement((LONG*)&bbTraverseLabel)); for (BasicBlock* const block : Blocks()) { block->bbTraversalStamp = curTraversalStamp; } for (BasicBlock* const block : Blocks()) { if (checkBBNum) { // Check that bbNum is sequential assert(block->bbNext == nullptr || (block->bbNum + 1 == block->bbNext->bbNum)); } // If the block is a BBJ_COND, a BBJ_SWITCH or a // lowered GT_SWITCH_TABLE node then make sure it // ends with a conditional jump or a GT_SWITCH if (block->bbJumpKind == BBJ_COND) { assert(block->lastNode()->gtNext == nullptr && block->lastNode()->OperIsConditionalJump()); } else if (block->bbJumpKind == BBJ_SWITCH) { assert(block->lastNode()->gtNext == nullptr && (block->lastNode()->gtOper == GT_SWITCH || block->lastNode()->gtOper == GT_SWITCH_TABLE)); } if (block->bbCatchTyp == BBCT_FILTER) { if (!fgCheapPredsValid) // Don't check cheap preds { // A filter has no predecessors assert(block->bbPreds == nullptr); } } #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { // // There should be no handler blocks until // we get to the fgFirstFuncletBB block, // then every block should be a handler block // if (!reachedFirstFunclet) { if (block == fgFirstFuncletBB) { assert(block->hasHndIndex() == true); reachedFirstFunclet = true; } else { assert(block->hasHndIndex() == false); } } else // reachedFirstFunclet { assert(block->hasHndIndex() == true); } } #endif // FEATURE_EH_FUNCLETS if (checkBBRefs) { assert(fgComputePredsDone); } BBPredsChecker checker(this); unsigned blockRefs = checker.CheckBBPreds(block, curTraversalStamp); // First basic block has an additional global incoming edge. if (block == fgFirstBB) { blockRefs += 1; } // Under OSR, if we also are keeping the original method entry around, // mark that as implicitly referenced as well. if (opts.IsOSR() && (block == fgEntryBB)) { blockRefs += 1; } /* Check the bbRefs */ if (checkBBRefs) { if (block->bbRefs != blockRefs) { // Check to see if this block is the beginning of a filter or a handler and adjust the ref count // appropriately. for (EHblkDsc* const HBtab : EHClauses(this)) { if (HBtab->ebdHndBeg == block) { blockRefs++; } if (HBtab->HasFilter() && (HBtab->ebdFilter == block)) { blockRefs++; } } } assert(block->bbRefs == blockRefs); } /* Check that BBF_HAS_HANDLER is valid bbTryIndex */ if (block->hasTryIndex()) { assert(block->getTryIndex() < compHndBBtabCount); } // A branch or fall-through to a BBJ_CALLFINALLY block must come from the `try` region associated // with the finally block the BBJ_CALLFINALLY is targeting. There is one special case: if the // BBJ_CALLFINALLY is the first block of a `try`, then its predecessor can be outside the `try`: // either a branch or fall-through to the first block. // // Note that this IR condition is a choice. It naturally occurs when importing EH constructs. // This condition prevents flow optimizations from skipping blocks in a `try` and branching // directly to the BBJ_CALLFINALLY. Relaxing this constraint would require careful thinking about // the implications, such as data flow optimizations. // // Don't depend on predecessors list for the check. for (BasicBlock* const succBlock : block->Succs()) { if (succBlock->bbJumpKind == BBJ_CALLFINALLY) { BasicBlock* finallyBlock = succBlock->bbJumpDest; assert(finallyBlock->hasHndIndex()); unsigned finallyIndex = finallyBlock->getHndIndex(); // Now make sure the block branching to the BBJ_CALLFINALLY is in the correct region. The branch // to the BBJ_CALLFINALLY can come from the try region of the finally block, or from a more nested // try region, e.g.: // try { // try { // LEAVE L_OUTER; // this becomes a branch to a BBJ_CALLFINALLY in an outer try region // // (in the FEATURE_EH_CALLFINALLY_THUNKS case) // } catch { // } // } finally { // } // L_OUTER: // EHblkDsc* ehDsc = ehGetDsc(finallyIndex); if (ehDsc->ebdTryBeg == succBlock) { // The BBJ_CALLFINALLY is the first block of it's `try` region. Don't check the predecessor. // Note that this case won't occur in the FEATURE_EH_CALLFINALLY_THUNKS case, since the // BBJ_CALLFINALLY in that case won't exist in the `try` region of the `finallyIndex`. } else { assert(bbInTryRegions(finallyIndex, block)); } } } /* Check if BBF_RUN_RARELY is set that we have bbWeight of zero */ if (block->isRunRarely()) { assert(block->bbWeight == BB_ZERO_WEIGHT); } else { assert(block->bbWeight > BB_ZERO_WEIGHT); } } // Make sure the one return BB is not changed. if (genReturnBB != nullptr) { assert(genReturnBB->GetFirstLIRNode() != nullptr || genReturnBB->bbStmtList != nullptr); } // The general encoder/decoder (currently) only reports "this" as a generics context as a stack location, // so we mark info.compThisArg as lvAddrTaken to ensure that it is not enregistered. Otherwise, it should // not be address-taken. This variable determines if the address-taken-ness of "thisArg" is "OK". bool copiedForGenericsCtxt; #ifndef JIT32_GCENCODER copiedForGenericsCtxt = ((info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0); #else // JIT32_GCENCODER copiedForGenericsCtxt = false; #endif // JIT32_GCENCODER // This if only in support of the noway_asserts it contains. if (info.compIsStatic) { // For static method, should have never grabbed the temp. assert(lvaArg0Var == BAD_VAR_NUM); } else { // For instance method: assert(info.compThisArg != BAD_VAR_NUM); bool compThisArgAddrExposedOK = !lvaTable[info.compThisArg].IsAddressExposed(); #ifndef JIT32_GCENCODER compThisArgAddrExposedOK = compThisArgAddrExposedOK || copiedForGenericsCtxt; #endif // !JIT32_GCENCODER // Should never expose the address of arg 0 or write to arg 0. // In addition, lvArg0Var should remain 0 if arg0 is not // written to or address-exposed. assert(compThisArgAddrExposedOK && !lvaTable[info.compThisArg].lvHasILStoreOp && (lvaArg0Var == info.compThisArg || (lvaArg0Var != info.compThisArg && (lvaTable[lvaArg0Var].IsAddressExposed() || lvaTable[lvaArg0Var].lvHasILStoreOp || copiedForGenericsCtxt)))); } } //------------------------------------------------------------------------ // fgDebugCheckFlags: Validate various invariants related to the propagation // and setting of tree flags ("gtFlags"). // // Arguments: // tree - the tree to (recursively) check the flags for // void Compiler::fgDebugCheckFlags(GenTree* tree) { GenTreeFlags actualFlags = tree->gtFlags & GTF_ALL_EFFECT; GenTreeFlags expectedFlags = GTF_EMPTY; if (tree->OperMayThrow(this)) { expectedFlags |= GTF_EXCEPT; } if (tree->OperRequiresAsgFlag()) { expectedFlags |= GTF_ASG; } if (tree->OperRequiresCallFlag(this)) { expectedFlags |= GTF_CALL; } // We reuse GTF_REVERSE_OPS as GTF_VAR_ARR_INDEX for LCL_VAR nodes. if (((tree->gtFlags & GTF_REVERSE_OPS) != 0) && !tree->OperIs(GT_LCL_VAR)) { assert(tree->OperSupportsReverseOpEvalOrder(this)); } GenTree* op1 = tree->OperIsSimple() ? tree->gtGetOp1() : nullptr; switch (tree->OperGet()) { case GT_CLS_VAR: expectedFlags |= GTF_GLOB_REF; break; case GT_CATCH_ARG: expectedFlags |= GTF_ORDER_SIDEEFF; break; case GT_MEMORYBARRIER: expectedFlags |= (GTF_GLOB_REF | GTF_ASG); break; case GT_LCL_VAR: assert((tree->gtFlags & GTF_VAR_FOLDED_IND) == 0); break; case GT_QMARK: assert(!op1->CanCSE()); assert(op1->OperIsCompare() || op1->IsIntegralConst(0) || op1->IsIntegralConst(1)); break; case GT_ASG: case GT_ADDR: // Note that this is a weak check - the "op1" location node can be a COMMA. assert(!op1->CanCSE()); break; case GT_IND: // Do we have a constant integer address as op1 that is also a handle? if (op1->IsCnsIntOrI() && op1->IsIconHandle()) { if ((tree->gtFlags & GTF_IND_INVARIANT) != 0) { actualFlags |= GTF_IND_INVARIANT; } if ((tree->gtFlags & GTF_IND_NONFAULTING) != 0) { actualFlags |= GTF_IND_NONFAULTING; } GenTreeFlags handleKind = op1->GetIconHandleFlag(); // Some of these aren't handles to invariant data... if ((handleKind == GTF_ICON_STATIC_HDL) || // Pointer to a mutable class Static variable (handleKind == GTF_ICON_BBC_PTR) || // Pointer to a mutable basic block count value (handleKind == GTF_ICON_GLOBAL_PTR)) // Pointer to mutable data from the VM state { // For statics, we expect the GTF_GLOB_REF to be set. However, we currently // fail to set it in a number of situations, and so this check is disabled. // TODO: enable checking of GTF_GLOB_REF. // expectedFlags |= GTF_GLOB_REF; } else // All the other handle indirections are considered invariant { expectedFlags |= GTF_IND_INVARIANT; } // Currently we expect all indirections with constant addresses to be nonfaulting. expectedFlags |= GTF_IND_NONFAULTING; } break; case GT_CALL: GenTreeCall* call; call = tree->AsCall(); if ((call->gtCallThisArg != nullptr) && ((call->gtCallThisArg->GetNode()->gtFlags & GTF_ASG) != 0)) { // TODO-Cleanup: this is a patch for a violation in our GT_ASG propagation. // see https://github.com/dotnet/runtime/issues/13758 actualFlags |= GTF_ASG; } for (GenTreeCall::Use& use : call->Args()) { if ((use.GetNode()->gtFlags & GTF_ASG) != 0) { // TODO-Cleanup: this is a patch for a violation in our GT_ASG propagation. // see https://github.com/dotnet/runtime/issues/13758 actualFlags |= GTF_ASG; } } for (GenTreeCall::Use& use : call->LateArgs()) { if ((use.GetNode()->gtFlags & GTF_ASG) != 0) { // TODO-Cleanup: this is a patch for a violation in our GT_ASG propagation. // see https://github.com/dotnet/runtime/issues/13758 actualFlags |= GTF_ASG; } } if (call->IsUnmanaged() && ((call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) != 0)) { if (call->gtCallArgs->GetNode()->OperGet() == GT_NOP) { assert(call->gtCallLateArgs->GetNode()->TypeIs(TYP_I_IMPL, TYP_BYREF)); } else { assert(call->gtCallArgs->GetNode()->TypeIs(TYP_I_IMPL, TYP_BYREF)); } } break; case GT_CMPXCHG: expectedFlags |= (GTF_GLOB_REF | GTF_ASG); break; default: break; } tree->VisitOperands([&](GenTree* operand) -> GenTree::VisitResult { // ASGs are nodes that produce no value, but have a type (essentially, the type of the location). // Validate that nodes that parent ASGs do not consume values. This check also ensures that code // which updates location types ("gsParamsToShadows" replaces small LCL_VARs with TYP_INT ones) // does not have to worry about propagating the new type "up the tree". // // Uncoditionally allowing COMMA here weakens the assert, but is necessary because the compiler // ("gtExtractSideEffList") can create "typed" "comma lists" with ASGs as second operands. // if (operand->OperIs(GT_ASG)) { assert(tree->IsCall() || tree->OperIs(GT_COMMA)); } fgDebugCheckFlags(operand); expectedFlags |= (operand->gtFlags & GTF_ALL_EFFECT); return GenTree::VisitResult::Continue; }); // ADDR nodes break the "parent flags >= operands flags" invariant for GTF_GLOB_REF. if (tree->OperIs(GT_ADDR) && op1->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_CLS_VAR)) { expectedFlags &= ~GTF_GLOB_REF; } fgDebugCheckFlagsHelper(tree, actualFlags, expectedFlags); } //------------------------------------------------------------------------------ // fgDebugCheckDispFlags: Wrapper function that displays GTF_IND_ flags // and then calls gtDispFlags to display the rest. // // Arguments: // tree - Tree whose flags are being checked // dispFlags - the first argument for gtDispFlags (flags to display), // including GTF_IND_INVARIANT, GTF_IND_NONFAULTING, GTF_IND_NONNULL // debugFlags - the second argument to gtDispFlags // void Compiler::fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags) { if (tree->OperGet() == GT_IND) { printf("%c", (dispFlags & GTF_IND_INVARIANT) ? '#' : '-'); printf("%c", (dispFlags & GTF_IND_NONFAULTING) ? 'n' : '-'); printf("%c", (dispFlags & GTF_IND_NONNULL) ? '@' : '-'); } GenTree::gtDispFlags(dispFlags, debugFlags); } //------------------------------------------------------------------------------ // fgDebugCheckFlagsHelper : Check if all bits that are set in chkFlags are also set in treeFlags. // // Arguments: // tree - Tree whose flags are being checked // actualFlags - Actual flags on the tree // expectedFlags - Expected flags // void Compiler::fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags) { if (expectedFlags & ~actualFlags) { // Print the tree so we can see it in the log. printf("Missing flags on tree [%06d]: ", dspTreeID(tree)); Compiler::fgDebugCheckDispFlags(tree, expectedFlags & ~actualFlags, GTF_DEBUG_NONE); printf("\n"); gtDispTree(tree); noway_assert(!"Missing flags on tree"); // Print the tree again so we can see it right after we hook up the debugger. printf("Missing flags on tree [%06d]: ", dspTreeID(tree)); Compiler::fgDebugCheckDispFlags(tree, expectedFlags & ~actualFlags, GTF_DEBUG_NONE); printf("\n"); gtDispTree(tree); } else if (actualFlags & ~expectedFlags) { // We can't/don't consider these flags (GTF_GLOB_REF or GTF_ORDER_SIDEEFF) as being "extra" flags // GenTreeFlags flagsToCheck = ~GTF_GLOB_REF & ~GTF_ORDER_SIDEEFF; if ((actualFlags & ~expectedFlags & flagsToCheck) != 0) { // Print the tree so we can see it in the log. printf("Extra flags on tree [%06d]: ", dspTreeID(tree)); Compiler::fgDebugCheckDispFlags(tree, actualFlags & ~expectedFlags, GTF_DEBUG_NONE); printf("\n"); gtDispTree(tree); noway_assert(!"Extra flags on tree"); // Print the tree again so we can see it right after we hook up the debugger. printf("Extra flags on tree [%06d]: ", dspTreeID(tree)); Compiler::fgDebugCheckDispFlags(tree, actualFlags & ~expectedFlags, GTF_DEBUG_NONE); printf("\n"); gtDispTree(tree); } } } // DEBUG routine to check correctness of the internal gtNext, gtPrev threading of a statement. // This threading is only valid when fgStmtListThreaded is true. // This calls an alternate method for FGOrderLinear. void Compiler::fgDebugCheckNodeLinks(BasicBlock* block, Statement* stmt) { // LIR blocks are checked using BasicBlock::CheckLIR(). if (block->IsLIR()) { LIR::AsRange(block).CheckLIR(this); // TODO: return? } assert(fgStmtListThreaded); noway_assert(stmt->GetTreeList()); // The first node's gtPrev must be nullptr (the gtPrev list is not circular). // The last node's gtNext must be nullptr (the gtNext list is not circular). This is tested if the loop below // terminates. assert(stmt->GetTreeList()->gtPrev == nullptr); for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext) { if (tree->gtPrev) { noway_assert(tree->gtPrev->gtNext == tree); } else { noway_assert(tree == stmt->GetTreeList()); } if (tree->gtNext) { noway_assert(tree->gtNext->gtPrev == tree); } else { noway_assert(tree == stmt->GetRootNode()); } /* Cross-check gtPrev,gtNext with GetOp() for simple trees */ GenTree* expectedPrevTree = nullptr; if (tree->OperIsLeaf()) { if (tree->gtOper == GT_CATCH_ARG) { // The GT_CATCH_ARG should always have GTF_ORDER_SIDEEFF set noway_assert(tree->gtFlags & GTF_ORDER_SIDEEFF); // The GT_CATCH_ARG has to be the first thing evaluated noway_assert(stmt == block->FirstNonPhiDef()); noway_assert(stmt->GetTreeList()->gtOper == GT_CATCH_ARG); // The root of the tree should have GTF_ORDER_SIDEEFF set noway_assert(stmt->GetRootNode()->gtFlags & GTF_ORDER_SIDEEFF); } } if (tree->OperIsUnary() && tree->AsOp()->gtOp1) { expectedPrevTree = tree->AsOp()->gtOp1; } else if (tree->OperIsBinary() && tree->AsOp()->gtOp1) { switch (tree->gtOper) { case GT_QMARK: // "then" operand of the GT_COLON (generated second). expectedPrevTree = tree->AsOp()->gtOp2->AsColon()->ThenNode(); break; case GT_COLON: expectedPrevTree = tree->AsColon()->ElseNode(); // "else" branch result (generated first). break; default: if (tree->AsOp()->gtOp2) { if (tree->gtFlags & GTF_REVERSE_OPS) { expectedPrevTree = tree->AsOp()->gtOp1; } else { expectedPrevTree = tree->AsOp()->gtOp2; } } else { expectedPrevTree = tree->AsOp()->gtOp1; } break; } } noway_assert(expectedPrevTree == nullptr || // No expectations about the prev node tree->gtPrev == expectedPrevTree); // The "normal" case } } /***************************************************************************** * * A DEBUG routine to check the correctness of the links between statements * and ordinary nodes within a statement. * ****************************************************************************/ void Compiler::fgDebugCheckLinks(bool morphTrees) { // This used to be only on for stress, and there was a comment stating that // it was "quite an expensive operation" but I did not find that to be true. // Set DO_SANITY_DEBUG_CHECKS to false to revert to that behavior. const bool DO_SANITY_DEBUG_CHECKS = true; if (!DO_SANITY_DEBUG_CHECKS && !compStressCompile(STRESS_CHK_FLOW_UPDATE, 30)) { return; } fgDebugCheckBlockLinks(); // For each block check the links between the trees. for (BasicBlock* const block : Blocks()) { if (block->IsLIR()) { LIR::AsRange(block).CheckLIR(this); } else { fgDebugCheckStmtsList(block, morphTrees); } } fgDebugCheckNodesUniqueness(); } //------------------------------------------------------------------------------ // fgDebugCheckStmtsList : Perfoms the set of checks: // - all statements in the block are linked correctly // - check statements flags // - check nodes gtNext and gtPrev values, if the node list is threaded // // Arguments: // block - the block to check statements in // morphTrees - try to morph trees in the checker // // Note: // Checking that all bits that are set in treeFlags are also set in chkFlags is currently disabled. void Compiler::fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees) { for (Statement* const stmt : block->Statements()) { // Verify that bbStmtList is threaded correctly. // Note that for the statements list, the GetPrevStmt() list is circular. // The GetNextStmt() list is not: GetNextStmt() of the last statement in a block is nullptr. noway_assert(stmt->GetPrevStmt() != nullptr); if (stmt == block->bbStmtList) { noway_assert(stmt->GetPrevStmt()->GetNextStmt() == nullptr); } else { noway_assert(stmt->GetPrevStmt()->GetNextStmt() == stmt); } if (stmt->GetNextStmt() != nullptr) { noway_assert(stmt->GetNextStmt()->GetPrevStmt() == stmt); } else { noway_assert(block->lastStmt() == stmt); } /* For each statement check that the exception flags are properly set */ noway_assert(stmt->GetRootNode()); if (verbose && 0) { gtDispTree(stmt->GetRootNode()); } fgDebugCheckFlags(stmt->GetRootNode()); // Not only will this stress fgMorphBlockStmt(), but we also get all the checks // done by fgMorphTree() if (morphTrees) { // If 'stmt' is removed from the block, start a new check for the current block, // break the current check. if (fgMorphBlockStmt(block, stmt DEBUGARG("test morphing"))) { fgDebugCheckStmtsList(block, morphTrees); break; } } // For each statement check that the nodes are threaded correctly - m_treeList. if (fgStmtListThreaded) { fgDebugCheckNodeLinks(block, stmt); } } } // ensure that bbNext and bbPrev are consistent void Compiler::fgDebugCheckBlockLinks() { assert(fgFirstBB->bbPrev == nullptr); for (BasicBlock* const block : Blocks()) { if (block->bbNext) { assert(block->bbNext->bbPrev == block); } else { assert(block == fgLastBB); } if (block->bbPrev) { assert(block->bbPrev->bbNext == block); } else { assert(block == fgFirstBB); } // If this is a switch, check that the tables are consistent. // Note that we don't call GetSwitchDescMap(), because it has the side-effect // of allocating it if it is not present. if (block->bbJumpKind == BBJ_SWITCH && m_switchDescMap != nullptr) { SwitchUniqueSuccSet uniqueSuccSet; if (m_switchDescMap->Lookup(block, &uniqueSuccSet)) { // Create a set with all the successors. Don't use BlockSet, so we don't need to worry // about the BlockSet epoch. BitVecTraits bitVecTraits(fgBBNumMax + 1, this); BitVec succBlocks(BitVecOps::MakeEmpty(&bitVecTraits)); for (BasicBlock* const bTarget : block->SwitchTargets()) { BitVecOps::AddElemD(&bitVecTraits, succBlocks, bTarget->bbNum); } // Now we should have a set of unique successors that matches what's in the switchMap. // First, check the number of entries, then make sure all the blocks in uniqueSuccSet // are in the BlockSet. unsigned count = BitVecOps::Count(&bitVecTraits, succBlocks); assert(uniqueSuccSet.numDistinctSuccs == count); for (unsigned i = 0; i < uniqueSuccSet.numDistinctSuccs; i++) { assert(BitVecOps::IsMember(&bitVecTraits, succBlocks, uniqueSuccSet.nonDuplicates[i]->bbNum)); } } } } } // UniquenessCheckWalker keeps data that is neccesary to check // that each tree has it is own unique id and they do not repeat. class UniquenessCheckWalker { public: UniquenessCheckWalker(Compiler* comp) : comp(comp), nodesVecTraits(comp->compGenTreeID, comp), uniqueNodes(BitVecOps::MakeEmpty(&nodesVecTraits)) { } //------------------------------------------------------------------------ // fgMarkTreeId: Visit all subtrees in the tree and check gtTreeIDs. // // Arguments: // pTree - Pointer to the tree to walk // fgWalkPre - the UniquenessCheckWalker instance // static Compiler::fgWalkResult MarkTreeId(GenTree** pTree, Compiler::fgWalkData* fgWalkPre) { UniquenessCheckWalker* walker = static_cast<UniquenessCheckWalker*>(fgWalkPre->pCallbackData); unsigned gtTreeID = (*pTree)->gtTreeID; walker->CheckTreeId(gtTreeID); return Compiler::WALK_CONTINUE; } //------------------------------------------------------------------------ // CheckTreeId: Check that this tree was not visited before and memorize it as visited. // // Arguments: // gtTreeID - identificator of GenTree. // // Note: // This method causes an assert failure when we find a duplicated node in our tree // void CheckTreeId(unsigned gtTreeID) { if (BitVecOps::IsMember(&nodesVecTraits, uniqueNodes, gtTreeID)) { if (comp->verbose) { printf("Duplicate gtTreeID was found: %d\n", gtTreeID); } assert(!"Duplicate gtTreeID was found"); } else { BitVecOps::AddElemD(&nodesVecTraits, uniqueNodes, gtTreeID); } } private: Compiler* comp; BitVecTraits nodesVecTraits; BitVec uniqueNodes; }; //------------------------------------------------------------------------------ // fgDebugCheckNodesUniqueness: Check that each tree in the method has its own unique gtTreeId. // void Compiler::fgDebugCheckNodesUniqueness() { UniquenessCheckWalker walker(this); for (BasicBlock* const block : Blocks()) { if (block->IsLIR()) { for (GenTree* i : LIR::AsRange(block)) { walker.CheckTreeId(i->gtTreeID); } } else { for (Statement* const stmt : block->Statements()) { GenTree* root = stmt->GetRootNode(); fgWalkTreePre(&root, UniquenessCheckWalker::MarkTreeId, &walker); } } } } //------------------------------------------------------------------------------ // fgDebugCheckLoopTable: checks that the loop table is valid. // - If the method has natural loops, the loop table is not null // - Loop `top` must come before `bottom`. // - Loop `entry` must be between `top` and `bottom`. // - Children loops of a loop are disjoint. // - All basic blocks with loop numbers set have a corresponding loop in the table // - All basic blocks without a loop number are not in a loop // - All parents of the loop with the block contain that block // - If the loop has a pre-header, it is valid // - The loop flags are valid // void Compiler::fgDebugCheckLoopTable() { #ifdef DEBUG if (verbose) { printf("*************** In fgDebugCheckLoopTable\n"); } #endif // DEBUG if (optLoopCount > 0) { assert(optLoopTable != nullptr); } // Build a mapping from existing block list number (bbNum) to the block number it would be after the // blocks are renumbered. This allows making asserts about the relative ordering of blocks using block number // without actually renumbering the blocks, which would affect non-DEBUG code paths. Note that there may be // `blockNumMap[bbNum] == 0` if the `bbNum` block was deleted and blocks haven't been renumbered since // the deletion. unsigned bbNumMax = impInlineRoot()->fgBBNumMax; // blockNumMap[old block number] => new block number size_t blockNumBytes = (bbNumMax + 1) * sizeof(unsigned); unsigned* blockNumMap = (unsigned*)_alloca(blockNumBytes); memset(blockNumMap, 0, blockNumBytes); unsigned newBBnum = 1; for (BasicBlock* const block : Blocks()) { if ((block->bbFlags & BBF_REMOVED) == 0) { assert(1 <= block->bbNum && block->bbNum <= bbNumMax); assert(blockNumMap[block->bbNum] == 0); // If this fails, we have two blocks with the same block number. blockNumMap[block->bbNum] = newBBnum++; } } struct MappedChecks { static bool lpWellFormed(const unsigned* blockNumMap, const LoopDsc* loop) { return (blockNumMap[loop->lpTop->bbNum] <= blockNumMap[loop->lpEntry->bbNum]) && (blockNumMap[loop->lpEntry->bbNum] <= blockNumMap[loop->lpBottom->bbNum]) && ((blockNumMap[loop->lpHead->bbNum] < blockNumMap[loop->lpTop->bbNum]) || (blockNumMap[loop->lpHead->bbNum] > blockNumMap[loop->lpBottom->bbNum])); } static bool lpContains(const unsigned* blockNumMap, const LoopDsc* loop, const BasicBlock* blk) { return (blockNumMap[loop->lpTop->bbNum] <= blockNumMap[blk->bbNum]) && (blockNumMap[blk->bbNum] <= blockNumMap[loop->lpBottom->bbNum]); } static bool lpContains(const unsigned* blockNumMap, const LoopDsc* loop, const BasicBlock* top, const BasicBlock* bottom) { return (blockNumMap[loop->lpTop->bbNum] <= blockNumMap[top->bbNum]) && (blockNumMap[bottom->bbNum] < blockNumMap[loop->lpBottom->bbNum]); } static bool lpContains(const unsigned* blockNumMap, const LoopDsc* loop, const LoopDsc& lp2) { return lpContains(blockNumMap, loop, lp2.lpTop, lp2.lpBottom); } static bool lpContainedBy(const unsigned* blockNumMap, const LoopDsc* loop, const BasicBlock* top, const BasicBlock* bottom) { return (blockNumMap[top->bbNum] <= blockNumMap[loop->lpTop->bbNum]) && (blockNumMap[loop->lpBottom->bbNum] < blockNumMap[bottom->bbNum]); } static bool lpContainedBy(const unsigned* blockNumMap, const LoopDsc* loop, const LoopDsc& lp2) { return lpContainedBy(blockNumMap, loop, lp2.lpTop, lp2.lpBottom); } static bool lpDisjoint(const unsigned* blockNumMap, const LoopDsc* loop, const BasicBlock* top, const BasicBlock* bottom) { return (blockNumMap[bottom->bbNum] < blockNumMap[loop->lpTop->bbNum]) || (blockNumMap[loop->lpBottom->bbNum] < blockNumMap[top->bbNum]); } static bool lpDisjoint(const unsigned* blockNumMap, const LoopDsc* loop, const LoopDsc& lp2) { return lpDisjoint(blockNumMap, loop, lp2.lpTop, lp2.lpBottom); } }; // Check the loop table itself. int preHeaderCount = 0; for (unsigned i = 0; i < optLoopCount; i++) { const LoopDsc& loop = optLoopTable[i]; // Ignore removed loops if (loop.lpFlags & LPFLG_REMOVED) { continue; } assert(loop.lpHead != nullptr); assert(loop.lpTop != nullptr); assert(loop.lpEntry != nullptr); assert(loop.lpBottom != nullptr); assert(MappedChecks::lpWellFormed(blockNumMap, &loop)); if (loop.lpExitCnt == 1) { assert(loop.lpExit != nullptr); assert(MappedChecks::lpContains(blockNumMap, &loop, loop.lpExit)); } else { assert(loop.lpExit == nullptr); } if (loop.lpParent == BasicBlock::NOT_IN_LOOP) { // This is a top-level loop. // Verify all top-level loops are disjoint. We don't have a list of just these (such as a // top-level pseudo-loop entry with a list of all top-level lists), so we have to iterate // over the entire loop table. for (unsigned j = 0; j < optLoopCount; j++) { if (i == j) { // Don't compare against ourselves. continue; } const LoopDsc& otherLoop = optLoopTable[j]; if (otherLoop.lpFlags & LPFLG_REMOVED) { continue; } if (otherLoop.lpParent != BasicBlock::NOT_IN_LOOP) { // Only consider top-level loops continue; } assert(MappedChecks::lpDisjoint(blockNumMap, &loop, otherLoop)); } } else { // This is not a top-level loop assert(loop.lpParent != BasicBlock::NOT_IN_LOOP); assert(loop.lpParent < optLoopCount); assert(loop.lpParent < i); // outer loops come before inner loops in the table const LoopDsc& parentLoop = optLoopTable[loop.lpParent]; assert((parentLoop.lpFlags & LPFLG_REMOVED) == 0); // don't allow removed parent loop? assert(MappedChecks::lpContainedBy(blockNumMap, &loop, optLoopTable[loop.lpParent])); } if (loop.lpChild != BasicBlock::NOT_IN_LOOP) { // Verify all child loops are contained in the parent loop. for (unsigned child = loop.lpChild; // child != BasicBlock::NOT_IN_LOOP; // child = optLoopTable[child].lpSibling) { assert(child < optLoopCount); assert(i < child); // outer loops come before inner loops in the table const LoopDsc& childLoop = optLoopTable[child]; if (childLoop.lpFlags & LPFLG_REMOVED) // removed child loop might still be in table { continue; } assert(MappedChecks::lpContains(blockNumMap, &loop, childLoop)); assert(childLoop.lpParent == i); } // Verify all child loops are disjoint. for (unsigned child = loop.lpChild; // child != BasicBlock::NOT_IN_LOOP; // child = optLoopTable[child].lpSibling) { const LoopDsc& childLoop = optLoopTable[child]; if (childLoop.lpFlags & LPFLG_REMOVED) { continue; } for (unsigned child2 = optLoopTable[child].lpSibling; // child2 != BasicBlock::NOT_IN_LOOP; // child2 = optLoopTable[child2].lpSibling) { const LoopDsc& child2Loop = optLoopTable[child2]; if (child2Loop.lpFlags & LPFLG_REMOVED) { continue; } assert(MappedChecks::lpDisjoint(blockNumMap, &childLoop, child2Loop)); } } } // If the loop has a pre-header, ensure the pre-header form is correct. if ((loop.lpFlags & LPFLG_HAS_PREHEAD) != 0) { ++preHeaderCount; BasicBlock* h = loop.lpHead; assert(h->bbFlags & BBF_LOOP_PREHEADER); // The pre-header can only be BBJ_ALWAYS or BBJ_NONE and must enter the loop. BasicBlock* e = loop.lpEntry; if (h->bbJumpKind == BBJ_ALWAYS) { assert(h->bbJumpDest == e); } else { assert(h->bbJumpKind == BBJ_NONE); assert(h->bbNext == e); assert(loop.lpTop == e); assert(loop.lpIsTopEntry()); } // The entry block has a single non-loop predecessor, and it is the pre-header. for (BasicBlock* const predBlock : e->PredBlocks()) { if (predBlock != h) { assert(MappedChecks::lpContains(blockNumMap, &loop, predBlock)); } } loop.lpValidatePreHeader(); } // Check the flags. // Note that the various init/limit flags are only used when LPFLG_ITER is set, but they are set first, // separately, and only if everything works out is LPFLG_ITER set. If LPFLG_ITER is NOT set, the // individual flags are not un-set (arguably, they should be). // Only one of the `init` flags can be set. assert(genCountBits((unsigned)(loop.lpFlags & (LPFLG_VAR_INIT | LPFLG_CONST_INIT))) <= 1); // Only one of the `limit` flags can be set. (Note that LPFLG_SIMD_LIMIT is a "sub-flag" that can be // set when LPFLG_CONST_LIMIT is set.) assert(genCountBits((unsigned)(loop.lpFlags & (LPFLG_VAR_LIMIT | LPFLG_CONST_LIMIT | LPFLG_ARRLEN_LIMIT))) <= 1); // LPFLG_SIMD_LIMIT can only be set if LPFLG_CONST_LIMIT is set. if (loop.lpFlags & LPFLG_SIMD_LIMIT) { assert(loop.lpFlags & LPFLG_CONST_LIMIT); } if (loop.lpFlags & (LPFLG_CONST_INIT | LPFLG_VAR_INIT)) { assert(loop.lpInitBlock != nullptr); if (loop.lpFlags & LPFLG_VAR_INIT) { assert(loop.lpVarInit < lvaCount); } } if (loop.lpFlags & LPFLG_ITER) { loop.VERIFY_lpIterTree(); loop.VERIFY_lpTestTree(); } } // Check basic blocks for loop annotations. for (BasicBlock* const block : Blocks()) { if (optLoopCount == 0) { assert(block->bbNatLoopNum == BasicBlock::NOT_IN_LOOP); continue; } // Walk the loop table and find the first loop that contains our block. // It should be the innermost one. int loopNum = BasicBlock::NOT_IN_LOOP; for (int i = optLoopCount - 1; i >= 0; i--) { // Ignore removed loops if (optLoopTable[i].lpFlags & LPFLG_REMOVED) { continue; } // Does this loop contain our block? if (MappedChecks::lpContains(blockNumMap, &optLoopTable[i], block)) { loopNum = i; break; } } // If there is at least one loop that contains this block... if (loopNum != BasicBlock::NOT_IN_LOOP) { // ...it must be the one pointed to by bbNatLoopNum. assert(block->bbNatLoopNum == loopNum); // TODO: We might want the following assert, but there are cases where we don't move all // return blocks out of the loop. // Return blocks are not allowed inside a loop; they should have been moved elsewhere. // assert(block->bbJumpKind != BBJ_RETURN); } else { // Otherwise, this block should not point to a loop. assert(block->bbNatLoopNum == BasicBlock::NOT_IN_LOOP); } // All loops that contain the innermost loop with this block must also contain this block. while (loopNum != BasicBlock::NOT_IN_LOOP) { assert(MappedChecks::lpContains(blockNumMap, &optLoopTable[loopNum], block)); loopNum = optLoopTable[loopNum].lpParent; } if (block->bbFlags & BBF_LOOP_PREHEADER) { // Note that the bbNatLoopNum will not point to the loop where this is a pre-header, since bbNatLoopNum // is only set on the blocks from `top` to `bottom`, and `head` is outside that. --preHeaderCount; } } // Verify that the number of loops marked as having pre-headers is the same as the number of blocks // with the pre-header flag set. assert(preHeaderCount == 0); } /*****************************************************************************/ #endif // DEBUG
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "allocacheck.h" // for alloca // Flowgraph Check and Dump Support #ifdef DEBUG void Compiler::fgPrintEdgeWeights() { // Print out all of the edge weights for (BasicBlock* const bDst : Blocks()) { if (bDst->bbPreds != nullptr) { printf(" Edge weights into " FMT_BB " :", bDst->bbNum); for (flowList* const edge : bDst->PredEdges()) { BasicBlock* bSrc = edge->getBlock(); // This is the control flow edge (bSrc -> bDst) printf(FMT_BB " ", bSrc->bbNum); if (edge->edgeWeightMin() < BB_MAX_WEIGHT) { printf("(%f", edge->edgeWeightMin()); } else { printf("(MAX"); } if (edge->edgeWeightMin() != edge->edgeWeightMax()) { if (edge->edgeWeightMax() < BB_MAX_WEIGHT) { printf("..%f", edge->edgeWeightMax()); } else { printf("..MAX"); } } printf(")"); if (edge->flNext != nullptr) { printf(", "); } } printf("\n"); } } } #endif // DEBUG /***************************************************************************** * Check that the flow graph is really updated */ #ifdef DEBUG void Compiler::fgDebugCheckUpdate() { if (!compStressCompile(STRESS_CHK_FLOW_UPDATE, 30)) { return; } /* We check for these conditions: * no unreachable blocks -> no blocks have countOfInEdges() = 0 * no empty blocks -> !block->isEmpty(), unless non-removable or multiple in-edges * no un-imported blocks -> no blocks have BBF_IMPORTED not set (this is * kind of redundand with the above, but to make sure) * no un-compacted blocks -> BBJ_NONE followed by block with no jumps to it (countOfInEdges() = 1) */ BasicBlock* prev; BasicBlock* block; for (prev = nullptr, block = fgFirstBB; block != nullptr; prev = block, block = block->bbNext) { /* no unreachable blocks */ if ((block->countOfInEdges() == 0) && !(block->bbFlags & BBF_DONT_REMOVE) #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // With funclets, we never get rid of the BBJ_ALWAYS part of a BBJ_CALLFINALLY/BBJ_ALWAYS pair, // even if we can prove that the finally block never returns. && !block->isBBCallAlwaysPairTail() #endif // FEATURE_EH_FUNCLETS ) { noway_assert(!"Unreachable block not removed!"); } /* no empty blocks */ if (block->isEmpty() && !(block->bbFlags & BBF_DONT_REMOVE)) { switch (block->bbJumpKind) { case BBJ_CALLFINALLY: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: case BBJ_RETURN: /* for BBJ_ALWAYS is probably just a GOTO, but will have to be treated */ case BBJ_ALWAYS: case BBJ_EHCATCHRET: /* These jump kinds are allowed to have empty tree lists */ break; default: /* it may be the case that the block had more than one reference to it * so we couldn't remove it */ if (block->countOfInEdges() == 0) { noway_assert(!"Empty block not removed!"); } break; } } /* no un-imported blocks */ if (!(block->bbFlags & BBF_IMPORTED)) { /* internal blocks do not count */ if (!(block->bbFlags & BBF_INTERNAL)) { noway_assert(!"Non IMPORTED block not removed!"); } } bool prevIsCallAlwaysPair = block->isBBCallAlwaysPairTail(); // Check for an unnecessary jumps to the next block bool doAssertOnJumpToNextBlock = false; // unless we have a BBJ_COND or BBJ_ALWAYS we can not assert if (block->bbJumpKind == BBJ_COND) { // A conditional branch should never jump to the next block // as it can be folded into a BBJ_NONE; doAssertOnJumpToNextBlock = true; } else if (block->bbJumpKind == BBJ_ALWAYS) { // Generally we will want to assert if a BBJ_ALWAYS branches to the next block doAssertOnJumpToNextBlock = true; // If the BBF_KEEP_BBJ_ALWAYS flag is set we allow it to jump to the next block if (block->bbFlags & BBF_KEEP_BBJ_ALWAYS) { doAssertOnJumpToNextBlock = false; } // A call/always pair is also allowed to jump to the next block if (prevIsCallAlwaysPair) { doAssertOnJumpToNextBlock = false; } // We are allowed to have a branch from a hot 'block' to a cold 'bbNext' // if ((block->bbNext != nullptr) && fgInDifferentRegions(block, block->bbNext)) { doAssertOnJumpToNextBlock = false; } } if (doAssertOnJumpToNextBlock) { if (block->bbJumpDest == block->bbNext) { noway_assert(!"Unnecessary jump to the next block!"); } } /* Make sure BBF_KEEP_BBJ_ALWAYS is set correctly */ if ((block->bbJumpKind == BBJ_ALWAYS) && prevIsCallAlwaysPair) { noway_assert(block->bbFlags & BBF_KEEP_BBJ_ALWAYS); } /* For a BBJ_CALLFINALLY block we make sure that we are followed by */ /* an BBJ_ALWAYS block with BBF_INTERNAL set */ /* or that it's a BBF_RETLESS_CALL */ if (block->bbJumpKind == BBJ_CALLFINALLY) { assert((block->bbFlags & BBF_RETLESS_CALL) || block->isBBCallAlwaysPair()); } /* no un-compacted blocks */ if (fgCanCompactBlocks(block, block->bbNext)) { noway_assert(!"Found un-compacted blocks!"); } } } #endif // DEBUG #if DUMP_FLOWGRAPHS struct escapeMapping_t { char ch; const char* sub; }; // clang-format off static escapeMapping_t s_EscapeFileMapping[] = { {':', "="}, {'<', "["}, {'>', "]"}, {';', "~semi~"}, {'|', "~bar~"}, {'&', "~amp~"}, {'"', "~quot~"}, {'*', "~star~"}, {0, nullptr} }; static escapeMapping_t s_EscapeMapping[] = { {'<', "&lt;"}, {'>', "&gt;"}, {'&', "&amp;"}, {'"', "&quot;"}, {0, nullptr} }; // clang-format on const char* Compiler::fgProcessEscapes(const char* nameIn, escapeMapping_t* map) { const char* nameOut = nameIn; unsigned lengthOut; unsigned index; bool match; bool subsitutionRequired; const char* pChar; lengthOut = 1; subsitutionRequired = false; pChar = nameIn; while (*pChar != '\0') { match = false; index = 0; while (map[index].ch != 0) { if (*pChar == map[index].ch) { match = true; break; } index++; } if (match) { subsitutionRequired = true; lengthOut += (unsigned)strlen(map[index].sub); } else { lengthOut += 1; } pChar++; } if (subsitutionRequired) { char* newName = getAllocator(CMK_DebugOnly).allocate<char>(lengthOut); char* pDest; pDest = newName; pChar = nameIn; while (*pChar != '\0') { match = false; index = 0; while (map[index].ch != 0) { if (*pChar == map[index].ch) { match = true; break; } index++; } if (match) { strcpy(pDest, map[index].sub); pDest += strlen(map[index].sub); } else { *pDest++ = *pChar; } pChar++; } *pDest++ = '\0'; nameOut = (const char*)newName; } return nameOut; } static void fprintfDouble(FILE* fgxFile, double value) { assert(value >= 0.0); if ((value >= 0.010) || (value == 0.0)) { fprintf(fgxFile, "\"%7.3f\"", value); } else if (value >= 0.00010) { fprintf(fgxFile, "\"%7.5f\"", value); } else { fprintf(fgxFile, "\"%7E\"", value); } } //------------------------------------------------------------------------ // fgDumpTree: Dump a tree into the DOT file. Used to provide a very short, one-line, // visualization of a BBJ_COND block. // // Arguments: // fgxFile - The file we are writing to. // tree - The operand to dump. // // static void Compiler::fgDumpTree(FILE* fgxFile, GenTree* const tree) { if (tree->OperIsCompare()) { // Want to generate something like: // V01 <= 7 // V01 > V02 const char* opName = GenTree::OpName(tree->OperGet()); // Make it look nicer if we can switch (tree->OperGet()) { case GT_EQ: opName = "=="; break; case GT_NE: opName = "!="; break; case GT_LT: opName = "<"; break; case GT_LE: opName = "<="; break; case GT_GE: opName = ">="; break; case GT_GT: opName = ">"; break; default: break; } GenTree* const lhs = tree->AsOp()->gtOp1; GenTree* const rhs = tree->AsOp()->gtOp2; fgDumpTree(fgxFile, lhs); fprintf(fgxFile, " %s ", opName); fgDumpTree(fgxFile, rhs); } else if (tree->IsCnsIntOrI()) { fprintf(fgxFile, "%d", tree->AsIntCon()->gtIconVal); } else if (tree->IsCnsFltOrDbl()) { fprintf(fgxFile, "%g", tree->AsDblCon()->gtDconVal); } else if (tree->IsLocal()) { fprintf(fgxFile, "V%02u", tree->AsLclVarCommon()->GetLclNum()); } else if (tree->OperIs(GT_ARR_LENGTH)) { GenTreeArrLen* arrLen = tree->AsArrLen(); GenTree* arr = arrLen->ArrRef(); fgDumpTree(fgxFile, arr); fprintf(fgxFile, ".Length"); } else { fprintf(fgxFile, "[%s]", GenTree::OpName(tree->OperGet())); } } //------------------------------------------------------------------------ // fgOpenFlowGraphFile: Open a file to dump either the xml or dot format flow graph // // Arguments: // wbDontClose - A boolean out argument that indicates whether the caller should close the file // phase - A phase identifier to indicate which phase is associated with the dump // pos - Are we being called to dump the flow graph pre-phase or post-phase? // type - A (wide) string indicating the type of dump, "dot" or "xml" // // Notes: // The filename to use to write the data comes from the COMPlus_JitDumpFgFile or COMPlus_NgenDumpFgFile // configuration. If unset, use "default". The "type" argument is used as a filename extension, // e.g., "default.dot". // // There are several "special" filenames recognized: // "profiled" -- only create graphs for methods with profile info, one file per method. // "hot" -- only create graphs for the hot region, one file per method. // "cold" -- only create graphs for the cold region, one file per method. // "jit" -- only create graphs for JITing, one file per method. // "all" -- create graphs for all regions, one file per method. // "stdout" -- output to stdout, not a file. // "stderr" -- output to stderr, not a file. // // Return Value: // Opens a file to which a flowgraph can be dumped, whose name is based on the current // config vales. FILE* Compiler::fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePosition pos, LPCWSTR type) { FILE* fgxFile; LPCWSTR prePhasePattern = nullptr; // pre-phase: default (used in Release) is no pre-phase dump LPCWSTR postPhasePattern = W("*"); // post-phase: default (used in Release) is dump all phases bool dumpFunction = true; // default (used in Release) is always dump LPCWSTR filename = nullptr; LPCWSTR pathname = nullptr; const char* escapedString; bool createDuplicateFgxFiles = true; if (fgBBcount <= 1) { return nullptr; } #ifdef DEBUG if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { dumpFunction = JitConfig.NgenDumpFg().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args); filename = JitConfig.NgenDumpFgFile(); pathname = JitConfig.NgenDumpFgDir(); } else { dumpFunction = JitConfig.JitDumpFg().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args); filename = JitConfig.JitDumpFgFile(); pathname = JitConfig.JitDumpFgDir(); } prePhasePattern = JitConfig.JitDumpFgPrePhase(); postPhasePattern = JitConfig.JitDumpFgPhase(); #endif // DEBUG if (!dumpFunction) { return nullptr; } LPCWSTR phaseName = PhaseShortNames[phase]; if (pos == PhasePosition::PrePhase) { if (prePhasePattern == nullptr) { // If pre-phase pattern is not specified, then don't dump for any pre-phase. return nullptr; } else if (*prePhasePattern != W('*')) { if (wcsstr(prePhasePattern, phaseName) == nullptr) { return nullptr; } } } else { assert(pos == PhasePosition::PostPhase); if (postPhasePattern == nullptr) { // There's no post-phase pattern specified. If there is a pre-phase pattern specified, then that will // be the only set of phases dumped. If neither are specified, then post-phase dump after // PHASE_DETERMINE_FIRST_COLD_BLOCK. if (prePhasePattern != nullptr) { return nullptr; } if (phase != PHASE_DETERMINE_FIRST_COLD_BLOCK) { return nullptr; } } else if (*postPhasePattern != W('*')) { if (wcsstr(postPhasePattern, phaseName) == nullptr) { return nullptr; } } } if (filename == nullptr) { filename = W("default"); } if (wcscmp(filename, W("profiled")) == 0) { if (fgFirstBB->hasProfileWeight()) { createDuplicateFgxFiles = true; goto ONE_FILE_PER_METHOD; } else { return nullptr; } } if (wcscmp(filename, W("hot")) == 0) { if (info.compMethodInfo->regionKind == CORINFO_REGION_HOT) { createDuplicateFgxFiles = true; goto ONE_FILE_PER_METHOD; } else { return nullptr; } } else if (wcscmp(filename, W("cold")) == 0) { if (info.compMethodInfo->regionKind == CORINFO_REGION_COLD) { createDuplicateFgxFiles = true; goto ONE_FILE_PER_METHOD; } else { return nullptr; } } else if (wcscmp(filename, W("jit")) == 0) { if (info.compMethodInfo->regionKind == CORINFO_REGION_JIT) { createDuplicateFgxFiles = true; goto ONE_FILE_PER_METHOD; } else { return nullptr; } } else if (wcscmp(filename, W("all")) == 0) { createDuplicateFgxFiles = true; ONE_FILE_PER_METHOD:; escapedString = fgProcessEscapes(info.compFullName, s_EscapeFileMapping); const char* tierName = compGetTieringName(true); size_t wCharCount = strlen(escapedString) + wcslen(phaseName) + 1 + strlen("~999") + wcslen(type) + strlen(tierName) + 1; if (pathname != nullptr) { wCharCount += wcslen(pathname) + 1; } filename = (LPCWSTR)_alloca(wCharCount * sizeof(WCHAR)); if (pathname != nullptr) { swprintf_s((LPWSTR)filename, wCharCount, W("%s\\%S-%s-%S.%s"), pathname, escapedString, phaseName, tierName, type); } else { swprintf_s((LPWSTR)filename, wCharCount, W("%S.%s"), escapedString, type); } fgxFile = _wfopen(filename, W("r")); // Check if this file already exists if (fgxFile != nullptr) { // For Generic methods we will have both hot and cold versions if (createDuplicateFgxFiles == false) { fclose(fgxFile); return nullptr; } // Yes, this filename already exists, so create a different one by appending ~2, ~3, etc... for (int i = 2; i < 1000; i++) { fclose(fgxFile); if (pathname != nullptr) { swprintf_s((LPWSTR)filename, wCharCount, W("%s\\%S~%d.%s"), pathname, escapedString, i, type); } else { swprintf_s((LPWSTR)filename, wCharCount, W("%S~%d.%s"), escapedString, i, type); } fgxFile = _wfopen(filename, W("r")); // Check if this file exists if (fgxFile == nullptr) { break; } } // If we have already created 1000 files with this name then just fail if (fgxFile != nullptr) { fclose(fgxFile); return nullptr; } } fgxFile = _wfopen(filename, W("a+")); *wbDontClose = false; } else if (wcscmp(filename, W("stdout")) == 0) { fgxFile = jitstdout; *wbDontClose = true; } else if (wcscmp(filename, W("stderr")) == 0) { fgxFile = stderr; *wbDontClose = true; } else { LPCWSTR origFilename = filename; size_t wCharCount = wcslen(origFilename) + wcslen(type) + 2; if (pathname != nullptr) { wCharCount += wcslen(pathname) + 1; } filename = (LPCWSTR)_alloca(wCharCount * sizeof(WCHAR)); if (pathname != nullptr) { swprintf_s((LPWSTR)filename, wCharCount, W("%s\\%s.%s"), pathname, origFilename, type); } else { swprintf_s((LPWSTR)filename, wCharCount, W("%s.%s"), origFilename, type); } fgxFile = _wfopen(filename, W("a+")); *wbDontClose = false; } return fgxFile; } //------------------------------------------------------------------------ // fgDumpFlowGraph: Dump the xml or dot format flow graph, if enabled for this phase. // // Arguments: // phase - A phase identifier to indicate which phase is associated with the dump, // i.e. which phase has just completed. // pos - Are we being called to dump the flow graph pre-phase or post-phase? // // Return Value: // True iff a flowgraph has been dumped. // // Notes: // The xml dumps are the historical mechanism for dumping the flowgraph. // The dot format can be viewed by: // - https://sketchviz.com/ // - Graphviz (http://www.graphviz.org/) // - The command: // "C:\Program Files (x86)\Graphviz2.38\bin\dot.exe" -Tsvg -oFoo.svg -Kdot Foo.dot // will produce a Foo.svg file that can be opened with any svg-capable browser. // - http://rise4fun.com/Agl/ // - Cut and paste the graph from your .dot file, replacing the digraph on the page, and then click the play // button. // - It will show a rotating '/' and then render the graph in the browser. // MSAGL has also been open-sourced to https://github.com/Microsoft/automatic-graph-layout. // // Here are the config values that control it: // COMPlus_JitDumpFg A string (ala the COMPlus_JitDump string) indicating what methods to dump // flowgraphs for. // COMPlus_JitDumpFgDir A path to a directory into which the flowgraphs will be dumped. // COMPlus_JitDumpFgFile The filename to use. The default is "default.[xml|dot]". // Note that the new graphs will be appended to this file if it already exists. // COMPlus_NgenDumpFg Same as COMPlus_JitDumpFg, but for ngen compiles. // COMPlus_NgenDumpFgDir Same as COMPlus_JitDumpFgDir, but for ngen compiles. // COMPlus_NgenDumpFgFile Same as COMPlus_JitDumpFgFile, but for ngen compiles. // COMPlus_JitDumpFgPhase Phase(s) after which to dump the flowgraph. // Set to the short name of a phase to see the flowgraph after that phase. // Leave unset to dump after COLD-BLK (determine first cold block) or set to * // for all phases. // COMPlus_JitDumpFgPrePhase Phase(s) before which to dump the flowgraph. // COMPlus_JitDumpFgDot 0 for xml format, non-zero for dot format. (Default is dot format.) // COMPlus_JitDumpFgEH (dot only) 0 for no exception-handling information; non-zero to include // exception-handling regions. // COMPlus_JitDumpFgLoops (dot only) 0 for no loop information; non-zero to include loop regions. // COMPlus_JitDumpFgConstrained (dot only) 0 == don't constrain to mostly linear layout; non-zero == force // mostly lexical block linear layout. // COMPlus_JitDumpFgBlockId Display blocks with block ID, not just bbNum. // // Example: // // If you want to dump just before and after a single phase, say loop cloning, use: // set COMPlus_JitDumpFgPhase=LP-CLONE // set COMPlus_JitDumpFgPrePhase=LP-CLONE // bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) { bool result = false; bool dontClose = false; #ifdef DEBUG const bool createDotFile = JitConfig.JitDumpFgDot() != 0; const bool includeEH = (JitConfig.JitDumpFgEH() != 0) && !compIsForInlining(); // The loop table is not well maintained after the optimization phases, but there is no single point at which // it is declared invalid. For now, refuse to add loop information starting at the rationalize phase, to // avoid asserts. const bool includeLoops = (JitConfig.JitDumpFgLoops() != 0) && !compIsForInlining() && (phase < PHASE_RATIONALIZE); const bool constrained = JitConfig.JitDumpFgConstrained() != 0; const bool useBlockId = JitConfig.JitDumpFgBlockID() != 0; const bool displayBlockFlags = JitConfig.JitDumpFgBlockFlags() != 0; #else // !DEBUG const bool createDotFile = true; const bool includeEH = false; const bool includeLoops = false; const bool constrained = true; const bool useBlockId = false; const bool displayBlockFlags = false; #endif // !DEBUG FILE* fgxFile = fgOpenFlowGraphFile(&dontClose, phase, pos, createDotFile ? W("dot") : W("fgx")); if (fgxFile == nullptr) { return false; } JITDUMP("Dumping flow graph %s phase %s\n", (pos == PhasePosition::PrePhase) ? "before" : "after", PhaseNames[phase]); bool validWeights = fgHaveValidEdgeWeights; double weightDivisor = (double)BasicBlock::getCalledCount(this); const char* escapedString; const char* regionString = "NONE"; if (info.compMethodInfo->regionKind == CORINFO_REGION_HOT) { regionString = "HOT"; } else if (info.compMethodInfo->regionKind == CORINFO_REGION_COLD) { regionString = "COLD"; } else if (info.compMethodInfo->regionKind == CORINFO_REGION_JIT) { regionString = "JIT"; } if (createDotFile) { fprintf(fgxFile, "digraph FlowGraph {\n"); fprintf(fgxFile, " graph [label = \"%s%s\\n%s\\n%s\"];\n", info.compMethodName, compIsForInlining() ? "\\n(inlinee)" : "", (pos == PhasePosition::PrePhase) ? "before" : "after", PhaseNames[phase]); fprintf(fgxFile, " node [shape = \"Box\"];\n"); } else { fprintf(fgxFile, "<method"); escapedString = fgProcessEscapes(info.compFullName, s_EscapeMapping); fprintf(fgxFile, "\n name=\"%s\"", escapedString); escapedString = fgProcessEscapes(info.compClassName, s_EscapeMapping); fprintf(fgxFile, "\n className=\"%s\"", escapedString); escapedString = fgProcessEscapes(info.compMethodName, s_EscapeMapping); fprintf(fgxFile, "\n methodName=\"%s\"", escapedString); fprintf(fgxFile, "\n ngenRegion=\"%s\"", regionString); fprintf(fgxFile, "\n bytesOfIL=\"%d\"", info.compILCodeSize); fprintf(fgxFile, "\n localVarCount=\"%d\"", lvaCount); if (fgHaveProfileData()) { fprintf(fgxFile, "\n calledCount=\"%f\"", fgCalledCount); fprintf(fgxFile, "\n profileData=\"true\""); } if (compHndBBtabCount > 0) { fprintf(fgxFile, "\n hasEHRegions=\"true\""); } if (fgHasLoops) { fprintf(fgxFile, "\n hasLoops=\"true\""); } if (validWeights) { fprintf(fgxFile, "\n validEdgeWeights=\"true\""); if (!fgSlopUsedInEdgeWeights && !fgRangeUsedInEdgeWeights) { fprintf(fgxFile, "\n exactEdgeWeights=\"true\""); } } if (fgFirstColdBlock != nullptr) { fprintf(fgxFile, "\n firstColdBlock=\"%d\"", fgFirstColdBlock->bbNum); } fprintf(fgxFile, ">"); fprintf(fgxFile, "\n <blocks"); fprintf(fgxFile, "\n blockCount=\"%d\"", fgBBcount); fprintf(fgxFile, ">"); } // In some cases, we want to change the display based on whether an edge is lexically backwards, forwards, // or lexical successor. Also, for the region tree, using the lexical order is useful for determining where // to insert in the tree, to determine nesting. We'd like to use the bbNum to do this. However, we don't // want to renumber the blocks. So, create a mapping of bbNum to ordinal, and compare block order by // comparing the mapped ordinals instead. // // For inlinees, the max block number of the inliner is used, so we need to allocate the block map based on // that size, even though it means allocating a block map possibly much bigger than what's required for just // the inlinee blocks. unsigned blkMapSize = 1 + impInlineRoot()->fgBBNumMax; unsigned blockOrdinal = 1; unsigned* blkMap = new (this, CMK_DebugOnly) unsigned[blkMapSize]; memset(blkMap, 0, sizeof(unsigned) * blkMapSize); for (BasicBlock* const block : Blocks()) { assert(block->bbNum < blkMapSize); blkMap[block->bbNum] = blockOrdinal++; } static const char* kindImage[] = {"EHFINALLYRET", "EHFILTERRET", "EHCATCHRET", "THROW", "RETURN", "NONE", "ALWAYS", "LEAVE", "CALLFINALLY", "COND", "SWITCH"}; BasicBlock* block; for (block = fgFirstBB, blockOrdinal = 1; block != nullptr; block = block->bbNext, blockOrdinal++) { if (createDotFile) { fprintf(fgxFile, " " FMT_BB " [label = \"", block->bbNum); if (useBlockId) { fprintf(fgxFile, "%s", block->dspToString()); } else { fprintf(fgxFile, FMT_BB, block->bbNum); } if (displayBlockFlags) { // Don't display the `[` `]` unless we're going to display something. const BasicBlockFlags allDisplayedBlockFlags = BBF_TRY_BEG | BBF_FUNCLET_BEG | BBF_RUN_RARELY | BBF_LOOP_HEAD | BBF_LOOP_PREHEADER | BBF_LOOP_ALIGN; if (block->bbFlags & allDisplayedBlockFlags) { // Display a very few, useful, block flags fprintf(fgxFile, " ["); if (block->bbFlags & BBF_TRY_BEG) { fprintf(fgxFile, "T"); } if (block->bbFlags & BBF_FUNCLET_BEG) { fprintf(fgxFile, "F"); } if (block->bbFlags & BBF_RUN_RARELY) { fprintf(fgxFile, "R"); } if (block->bbFlags & BBF_LOOP_HEAD) { fprintf(fgxFile, "L"); } if (block->bbFlags & BBF_LOOP_PREHEADER) { fprintf(fgxFile, "P"); } if (block->bbFlags & BBF_LOOP_ALIGN) { fprintf(fgxFile, "A"); } fprintf(fgxFile, "]"); } } if (block->bbJumpKind == BBJ_COND) { fprintf(fgxFile, "\\n"); // Include a line with the basics of the branch condition, if possible. // Find the loop termination test at the bottom of the loop. Statement* condStmt = block->lastStmt(); if (condStmt != nullptr) { GenTree* const condTree = condStmt->GetRootNode(); noway_assert(condTree->gtOper == GT_JTRUE); GenTree* const compareTree = condTree->AsOp()->gtOp1; fgDumpTree(fgxFile, compareTree); } } // "Raw" Profile weight if (block->hasProfileWeight()) { fprintf(fgxFile, "\\n\\n%7.2f", ((double)block->getBBWeight(this)) / BB_UNITY_WEIGHT); } // end of block label fprintf(fgxFile, "\""); // other node attributes // if (block == fgFirstBB) { fprintf(fgxFile, ", shape = \"house\""); } else if (block->bbJumpKind == BBJ_RETURN) { fprintf(fgxFile, ", shape = \"invhouse\""); } else if (block->bbJumpKind == BBJ_THROW) { fprintf(fgxFile, ", shape = \"trapezium\""); } else if (block->bbFlags & BBF_INTERNAL) { fprintf(fgxFile, ", shape = \"note\""); } fprintf(fgxFile, "];\n"); } else { fprintf(fgxFile, "\n <block"); fprintf(fgxFile, "\n id=\"%d\"", block->bbNum); fprintf(fgxFile, "\n ordinal=\"%d\"", blockOrdinal); fprintf(fgxFile, "\n jumpKind=\"%s\"", kindImage[block->bbJumpKind]); if (block->hasTryIndex()) { fprintf(fgxFile, "\n inTry=\"%s\"", "true"); } if (block->hasHndIndex()) { fprintf(fgxFile, "\n inHandler=\"%s\"", "true"); } if ((fgFirstBB->hasProfileWeight()) && ((block->bbFlags & BBF_COLD) == 0)) { fprintf(fgxFile, "\n hot=\"true\""); } if (block->bbFlags & (BBF_HAS_NEWOBJ | BBF_HAS_NEWARRAY)) { fprintf(fgxFile, "\n callsNew=\"true\""); } if (block->bbFlags & BBF_LOOP_HEAD) { fprintf(fgxFile, "\n loopHead=\"true\""); } const char* rootTreeOpName = "n/a"; if (block->IsLIR() || (block->lastStmt() != nullptr)) { if (block->lastNode() != nullptr) { rootTreeOpName = GenTree::OpName(block->lastNode()->OperGet()); } } fprintf(fgxFile, "\n weight="); fprintfDouble(fgxFile, ((double)block->bbWeight) / weightDivisor); // fgGetCodeEstimate() will assert if the costs have not yet been initialized. // fprintf(fgxFile, "\n codeEstimate=\"%d\"", fgGetCodeEstimate(block)); fprintf(fgxFile, "\n startOffset=\"%d\"", block->bbCodeOffs); fprintf(fgxFile, "\n rootTreeOp=\"%s\"", rootTreeOpName); fprintf(fgxFile, "\n endOffset=\"%d\"", block->bbCodeOffsEnd); fprintf(fgxFile, ">"); fprintf(fgxFile, "\n </block>"); } } if (!createDotFile) { fprintf(fgxFile, "\n </blocks>"); fprintf(fgxFile, "\n <edges"); fprintf(fgxFile, "\n edgeCount=\"%d\"", fgEdgeCount); fprintf(fgxFile, ">"); } if (fgComputePredsDone) { unsigned edgeNum = 1; BasicBlock* bTarget; for (bTarget = fgFirstBB; bTarget != nullptr; bTarget = bTarget->bbNext) { double targetWeightDivisor; if (bTarget->bbWeight == BB_ZERO_WEIGHT) { targetWeightDivisor = 1.0; } else { targetWeightDivisor = (double)bTarget->bbWeight; } for (flowList* const edge : bTarget->PredEdges()) { BasicBlock* bSource = edge->getBlock(); double sourceWeightDivisor; if (bSource->bbWeight == BB_ZERO_WEIGHT) { sourceWeightDivisor = 1.0; } else { sourceWeightDivisor = (double)bSource->bbWeight; } if (createDotFile) { fprintf(fgxFile, " " FMT_BB " -> " FMT_BB, bSource->bbNum, bTarget->bbNum); const char* sep = ""; if (blkMap[bSource->bbNum] > blkMap[bTarget->bbNum]) { // Lexical backedge fprintf(fgxFile, " [color=green"); sep = ", "; } else if ((blkMap[bSource->bbNum] + 1) == blkMap[bTarget->bbNum]) { // Lexical successor fprintf(fgxFile, " [color=blue, weight=20"); sep = ", "; } else { fprintf(fgxFile, " ["); } if (validWeights) { weight_t edgeWeight = (edge->edgeWeightMin() + edge->edgeWeightMax()) / 2; fprintf(fgxFile, "%slabel=\"%7.2f\"", sep, (double)edgeWeight / weightDivisor); } fprintf(fgxFile, "];\n"); } else { fprintf(fgxFile, "\n <edge"); fprintf(fgxFile, "\n id=\"%d\"", edgeNum); fprintf(fgxFile, "\n source=\"%d\"", bSource->bbNum); fprintf(fgxFile, "\n target=\"%d\"", bTarget->bbNum); if (bSource->bbJumpKind == BBJ_SWITCH) { if (edge->flDupCount >= 2) { fprintf(fgxFile, "\n switchCases=\"%d\"", edge->flDupCount); } if (bSource->bbJumpSwt->getDefault() == bTarget) { fprintf(fgxFile, "\n switchDefault=\"true\""); } } if (validWeights) { weight_t edgeWeight = (edge->edgeWeightMin() + edge->edgeWeightMax()) / 2; fprintf(fgxFile, "\n weight="); fprintfDouble(fgxFile, ((double)edgeWeight) / weightDivisor); if (edge->edgeWeightMin() != edge->edgeWeightMax()) { fprintf(fgxFile, "\n minWeight="); fprintfDouble(fgxFile, ((double)edge->edgeWeightMin()) / weightDivisor); fprintf(fgxFile, "\n maxWeight="); fprintfDouble(fgxFile, ((double)edge->edgeWeightMax()) / weightDivisor); } if (edgeWeight > 0) { if (edgeWeight < bSource->bbWeight) { fprintf(fgxFile, "\n out="); fprintfDouble(fgxFile, ((double)edgeWeight) / sourceWeightDivisor); } if (edgeWeight < bTarget->bbWeight) { fprintf(fgxFile, "\n in="); fprintfDouble(fgxFile, ((double)edgeWeight) / targetWeightDivisor); } } } } if (!createDotFile) { fprintf(fgxFile, ">"); fprintf(fgxFile, "\n </edge>"); } ++edgeNum; } } } // For dot, show edges w/o pred lists, and add invisible bbNext links. // Also, add EH and/or loop regions as "cluster" subgraphs, if requested. // if (createDotFile) { for (BasicBlock* const bSource : Blocks()) { if (constrained) { // Invisible edge for bbNext chain // if (bSource->bbNext != nullptr) { fprintf(fgxFile, " " FMT_BB " -> " FMT_BB " [style=\"invis\", weight=25];\n", bSource->bbNum, bSource->bbNext->bbNum); } } if (fgComputePredsDone) { // Already emitted pred edges above. // continue; } // Emit successor edges // for (BasicBlock* const bTarget : bSource->Succs()) { fprintf(fgxFile, " " FMT_BB " -> " FMT_BB, bSource->bbNum, bTarget->bbNum); if (blkMap[bSource->bbNum] > blkMap[bTarget->bbNum]) { // Lexical backedge fprintf(fgxFile, " [color=green]\n"); } else if ((blkMap[bSource->bbNum] + 1) == blkMap[bTarget->bbNum]) { // Lexical successor fprintf(fgxFile, " [color=blue]\n"); } else { fprintf(fgxFile, ";\n"); } } } if ((includeEH && (compHndBBtabCount > 0)) || (includeLoops && (optLoopCount > 0))) { // Generate something like: // subgraph cluster_0 { // label = "xxx"; // color = yyy; // bb; bb; // subgraph { // label = "aaa"; // color = bbb; // bb; bb... // } // ... // } // // Thus, the subgraphs need to be nested to show the region nesting. // // The EH table is in order, top-to-bottom, most nested to least nested where // there is a parent/child relationship. The loop table the opposite: it is // in order from the least nested to most nested. // // Build a region tree, collecting all the regions we want to display, // and then walk it to emit the regions. // RegionGraph: represent non-overlapping, possibly nested, block ranges in the flow graph. class RegionGraph { public: enum class RegionType { Root, EH, Loop }; private: struct Region { Region(RegionType rgnType, const char* rgnName, BasicBlock* bbStart, BasicBlock* bbEnd) : m_rgnNext(nullptr) , m_rgnChild(nullptr) , m_rgnType(rgnType) , m_bbStart(bbStart) , m_bbEnd(bbEnd) { strcpy_s(m_rgnName, sizeof(m_rgnName), rgnName); } Region* m_rgnNext; Region* m_rgnChild; RegionType m_rgnType; char m_rgnName[30]; BasicBlock* m_bbStart; BasicBlock* m_bbEnd; }; public: RegionGraph(Compiler* comp, unsigned* blkMap, unsigned blkMapSize) : m_comp(comp), m_rgnRoot(nullptr), m_blkMap(blkMap), m_blkMapSize(blkMapSize) { // Create a root region that encompasses the whole function. m_rgnRoot = new (m_comp, CMK_DebugOnly) Region(RegionType::Root, "Root", comp->fgFirstBB, comp->fgLastBB); } //------------------------------------------------------------------------ // Insert: Insert a region [start..end] (inclusive) into the graph. // // Arguments: // name - the textual label to use for the region // rgnType - the region type // start - start block of the region // end - last block of the region // void Insert(const char* name, RegionType rgnType, BasicBlock* start, BasicBlock* end) { JITDUMP("Insert region: %s, type: %s, start: " FMT_BB ", end: " FMT_BB "\n", name, GetRegionType(rgnType), start->bbNum, end->bbNum); assert(start != nullptr); assert(end != nullptr); Region* newRgn = new (m_comp, CMK_DebugOnly) Region(rgnType, name, start, end); unsigned newStartOrdinal = m_blkMap[start->bbNum]; unsigned newEndOrdinal = m_blkMap[end->bbNum]; Region* curRgn = m_rgnRoot; unsigned curStartOrdinal = m_blkMap[curRgn->m_bbStart->bbNum]; unsigned curEndOrdinal = m_blkMap[curRgn->m_bbEnd->bbNum]; // A range can be a single block, but there can be no overlap between ranges. assert(newStartOrdinal <= newEndOrdinal); assert(curStartOrdinal <= curEndOrdinal); assert(newStartOrdinal >= curStartOrdinal); assert(newEndOrdinal <= curEndOrdinal); // We know the new region will be part of the current region. Should it be a direct // child, or put within one of the existing children? Region** lastChildPtr = &curRgn->m_rgnChild; Region* child = curRgn->m_rgnChild; while (child != nullptr) { unsigned childStartOrdinal = m_blkMap[child->m_bbStart->bbNum]; unsigned childEndOrdinal = m_blkMap[child->m_bbEnd->bbNum]; // Consider the following cases, where each "x" is a block in the range: // xxxxxxx // current 'child' range; we're comparing against this // xxxxxxx // (1) same range; could be considered child or parent // xxxxxxxxx // (2) parent range, shares last block // xxxxxxxxx // (3) parent range, shares first block // xxxxxxxxxxx // (4) fully overlapping parent range // xx // (5) non-overlapping preceding sibling range // xx // (6) non-overlapping following sibling range // xxx // (7) child range // xxx // (8) child range, shares same start block // x // (9) single-block child range, shares same start block // xxx // (10) child range, shares same end block // x // (11) single-block child range, shares same end block // xxxxxxx // illegal: overlapping ranges // xxx // illegal: overlapping ranges (shared child start block and new end block) // xxxxxxx // illegal: overlapping ranges // xxx // illegal: overlapping ranges (shared child end block and new start block) // Assert the child is properly nested within the parent. // Note that if regions have the same start and end, you can't tell which is nested within the // other, though it shouldn't matter. assert(childStartOrdinal <= childEndOrdinal); assert(curStartOrdinal <= childStartOrdinal); assert(childEndOrdinal <= curEndOrdinal); // Should the new region be before this child? // Case (5). if (newEndOrdinal < childStartOrdinal) { // Insert before this child. newRgn->m_rgnNext = child; *lastChildPtr = newRgn; break; } else if ((newStartOrdinal >= childStartOrdinal) && (newEndOrdinal <= childEndOrdinal)) { // Insert as a child of this child. // Need to recurse to walk the child's children list to see where it belongs. // Case (1), (7), (8), (9), (10), (11). curStartOrdinal = m_blkMap[child->m_bbStart->bbNum]; curEndOrdinal = m_blkMap[child->m_bbEnd->bbNum]; lastChildPtr = &child->m_rgnChild; child = child->m_rgnChild; continue; } else if (newStartOrdinal <= childStartOrdinal) { // The new region is a parent of one or more of the existing children. // Case (2), (3), (4). // Find all the children it encompasses. Region** lastEndChildPtr = &child->m_rgnNext; Region* endChild = child->m_rgnNext; while (endChild != nullptr) { unsigned endChildStartOrdinal = m_blkMap[endChild->m_bbStart->bbNum]; unsigned endChildEndOrdinal = m_blkMap[endChild->m_bbEnd->bbNum]; assert(endChildStartOrdinal <= endChildEndOrdinal); if (newEndOrdinal < endChildStartOrdinal) { // Found the range break; } lastEndChildPtr = &endChild->m_rgnNext; endChild = endChild->m_rgnNext; } // The range is [child..endChild previous]. If endChild is nullptr, then // the range is to the end of the parent. Move these all to be // children of newRgn, and put newRgn in where `child` is. newRgn->m_rgnNext = endChild; *lastChildPtr = newRgn; newRgn->m_rgnChild = child; *lastEndChildPtr = nullptr; break; } // Else, look for next child. // Case (6). lastChildPtr = &child->m_rgnNext; child = child->m_rgnNext; } if (child == nullptr) { // Insert as the last child (could be the only child). *lastChildPtr = newRgn; } } #ifdef DEBUG const unsigned dumpIndentIncrement = 2; // How much to indent each nested level. //------------------------------------------------------------------------ // GetRegionType: get a textual name for the region type, to be used in dumps. // // Arguments: // rgnType - the region type // static const char* GetRegionType(RegionType rgnType) { switch (rgnType) { case RegionType::Root: return "Root"; case RegionType::EH: return "EH"; case RegionType::Loop: return "Loop"; default: return "UNKNOWN"; } } //------------------------------------------------------------------------ // DumpRegionNode: Region graph dump helper to dump a region node at the given indent, // and recursive dump its children. // // Arguments: // rgn - the region to dump // indent - number of leading characters to indent all output // void DumpRegionNode(Region* rgn, unsigned indent) const { printf("%*s======\n", indent, ""); printf("%*sType: %s\n", indent, "", GetRegionType(rgn->m_rgnType)); printf("%*sName: %s\n", indent, "", rgn->m_rgnName); printf("%*sRange: " FMT_BB ".." FMT_BB "\n", indent, "", rgn->m_bbStart->bbNum, rgn->m_bbEnd->bbNum); for (Region* child = rgn->m_rgnChild; child != nullptr; child = child->m_rgnNext) { DumpRegionNode(child, indent + dumpIndentIncrement); } } //------------------------------------------------------------------------ // Dump: dump the entire region graph // void Dump() { printf("Region graph:\n"); DumpRegionNode(m_rgnRoot, 0); printf("\n"); } //------------------------------------------------------------------------ // VerifyNode: verify the region graph rooted at `rgn`. // // Arguments: // rgn - the node (and its children) to check. // void Verify(Region* rgn) { // The region needs to be a non-overlapping parent to all its children. // The children need to be non-overlapping, and in increasing order. unsigned rgnStartOrdinal = m_blkMap[rgn->m_bbStart->bbNum]; unsigned rgnEndOrdinal = m_blkMap[rgn->m_bbEnd->bbNum]; assert(rgnStartOrdinal <= rgnEndOrdinal); Region* child = rgn->m_rgnChild; Region* lastChild = nullptr; if (child != nullptr) { unsigned childStartOrdinal = m_blkMap[child->m_bbStart->bbNum]; unsigned childEndOrdinal = m_blkMap[child->m_bbEnd->bbNum]; assert(childStartOrdinal <= childEndOrdinal); assert(rgnStartOrdinal <= childStartOrdinal); while (true) { Verify(child); lastChild = child; unsigned lastChildStartOrdinal = childStartOrdinal; unsigned lastChildEndOrdinal = childEndOrdinal; child = child->m_rgnNext; if (child == nullptr) { break; } childStartOrdinal = m_blkMap[child->m_bbStart->bbNum]; childEndOrdinal = m_blkMap[child->m_bbEnd->bbNum]; assert(childStartOrdinal <= childEndOrdinal); // The children can't overlap; they can't share any blocks. assert(lastChildEndOrdinal < childStartOrdinal); } // The parent region must fully include the last child. assert(childEndOrdinal <= rgnEndOrdinal); } } //------------------------------------------------------------------------ // Verify: verify the region graph satisfies proper nesting, and other legality rules. // void Verify() { assert(m_comp != nullptr); assert(m_blkMap != nullptr); for (unsigned i = 0; i < m_blkMapSize; i++) { assert(m_blkMap[i] < m_blkMapSize); } // The root region has no siblings. assert(m_rgnRoot != nullptr); assert(m_rgnRoot->m_rgnNext == nullptr); Verify(m_rgnRoot); } #endif // DEBUG //------------------------------------------------------------------------ // Output: output the region graph to the .dot file // // Arguments: // file - the file to write output to. // void Output(FILE* file) { unsigned clusterNum = 0; // Output the regions; don't output the top (root) region that represents the whole function. for (Region* child = m_rgnRoot->m_rgnChild; child != nullptr; child = child->m_rgnNext) { OutputRegion(file, clusterNum, child, 4); } fprintf(file, "\n"); } private: //------------------------------------------------------------------------ // GetColorForRegion: get a color name to use for a region // // Arguments: // rgn - the region for which we need a color // static const char* GetColorForRegion(Region* rgn) { RegionType rgnType = rgn->m_rgnType; switch (rgnType) { case RegionType::EH: return "red"; case RegionType::Loop: return "blue"; default: return "black"; } } //------------------------------------------------------------------------ // OutputRegion: helper function to output a region and its nested children // to the .dot file. // // Arguments: // file - the file to write output to. // clusterNum - the number of this dot "cluster". This is updated as we // create new clusters. // rgn - the region to output. // indent - the current indent level, in characters. // void OutputRegion(FILE* file, unsigned& clusterNum, Region* rgn, unsigned indent) { fprintf(file, "%*ssubgraph cluster_%u {\n", indent, "", clusterNum); indent += 4; fprintf(file, "%*slabel = \"%s\";\n", indent, "", rgn->m_rgnName); fprintf(file, "%*scolor = %s;\n", indent, "", GetColorForRegion(rgn)); clusterNum++; bool needIndent = true; BasicBlock* bbCur = rgn->m_bbStart; BasicBlock* bbEnd = rgn->m_bbEnd->bbNext; Region* child = rgn->m_rgnChild; BasicBlock* childCurBB = (child == nullptr) ? nullptr : child->m_bbStart; // Count the children and assert we output all of them. unsigned totalChildren = 0; unsigned childCount = 0; for (Region* tmpChild = child; tmpChild != nullptr; tmpChild = tmpChild->m_rgnNext) { totalChildren++; } while (bbCur != bbEnd) { // Output from bbCur to current child first block. while ((bbCur != childCurBB) && (bbCur != bbEnd)) { fprintf(file, "%*s" FMT_BB ";", needIndent ? indent : 0, "", bbCur->bbNum); needIndent = false; bbCur = bbCur->bbNext; } if (bbCur == bbEnd) { // We're done at this level. break; } else { assert(bbCur != nullptr); // Or else we should also have `bbCur == bbEnd` assert(child != nullptr); // If there is a child, output that child. if (!needIndent) { // We've printed some basic blocks, so put the subgraph on a new line. fprintf(file, "\n"); } OutputRegion(file, clusterNum, child, indent); needIndent = true; childCount++; bbCur = child->m_bbEnd->bbNext; // Next, output blocks after this child. child = child->m_rgnNext; // Move to the next child, if any. childCurBB = (child == nullptr) ? nullptr : child->m_bbStart; } } // Put the end brace on its own line and leave the cursor at the beginning of the line for the // parent. indent -= 4; fprintf(file, "\n%*s}\n", indent, ""); assert(childCount == totalChildren); } Compiler* m_comp; Region* m_rgnRoot; unsigned* m_blkMap; unsigned m_blkMapSize; }; // Define the region graph object. We'll add regions to this, then output the graph. RegionGraph rgnGraph(this, blkMap, blkMapSize); // Add the EH regions to the region graph. An EH region consists of a region for the // `try`, a region for the handler, and, for filter/filter-handlers, a region for the // `filter` as well. if (includeEH) { char name[30]; unsigned XTnum; EHblkDsc* ehDsc; for (XTnum = 0, ehDsc = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, ehDsc++) { sprintf_s(name, sizeof(name), "EH#%u try", XTnum); rgnGraph.Insert(name, RegionGraph::RegionType::EH, ehDsc->ebdTryBeg, ehDsc->ebdTryLast); const char* handlerType = ""; switch (ehDsc->ebdHandlerType) { case EH_HANDLER_CATCH: handlerType = "catch"; break; case EH_HANDLER_FILTER: handlerType = "filter-hnd"; break; case EH_HANDLER_FAULT: handlerType = "fault"; break; case EH_HANDLER_FINALLY: handlerType = "finally"; break; case EH_HANDLER_FAULT_WAS_FINALLY: handlerType = "fault-was-finally"; break; } sprintf_s(name, sizeof(name), "EH#%u %s", XTnum, handlerType); rgnGraph.Insert(name, RegionGraph::RegionType::EH, ehDsc->ebdHndBeg, ehDsc->ebdHndLast); if (ehDsc->HasFilter()) { sprintf_s(name, sizeof(name), "EH#%u filter", XTnum); rgnGraph.Insert(name, RegionGraph::RegionType::EH, ehDsc->ebdFilter, ehDsc->ebdHndBeg->bbPrev); } } } // Add regions for the loops. Note that loops are assumed to be contiguous from `lpTop` to `lpBottom`. if (includeLoops) { #ifdef DEBUG const bool displayLoopFlags = JitConfig.JitDumpFgLoopFlags() != 0; #else // !DEBUG const bool displayLoopFlags = false; #endif // !DEBUG char name[30]; for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++) { const LoopDsc& loop = optLoopTable[loopNum]; if (loop.lpFlags & LPFLG_REMOVED) { continue; } sprintf_s(name, sizeof(name), FMT_LP, loopNum); if (displayLoopFlags) { // Display a very few, useful, loop flags strcat_s(name, sizeof(name), " ["); if (loop.lpFlags & LoopFlags::LPFLG_ITER) { strcat_s(name, sizeof(name), "I"); } if (loop.lpFlags & LoopFlags::LPFLG_HAS_PREHEAD) { strcat_s(name, sizeof(name), "P"); } strcat_s(name, sizeof(name), "]"); } rgnGraph.Insert(name, RegionGraph::RegionType::Loop, loop.lpTop, loop.lpBottom); } } // All the regions have been added. Now, output them. DBEXEC(verbose, rgnGraph.Dump()); INDEBUG(rgnGraph.Verify()); rgnGraph.Output(fgxFile); } } if (createDotFile) { fprintf(fgxFile, "}\n"); } else { fprintf(fgxFile, "\n </edges>"); fprintf(fgxFile, "\n</method>\n"); } if (dontClose) { // fgxFile is jitstdout or stderr fprintf(fgxFile, "\n"); } else { fclose(fgxFile); } return result; } #endif // DUMP_FLOWGRAPHS /*****************************************************************************/ #ifdef DEBUG void Compiler::fgDispReach() { printf("------------------------------------------------\n"); printf("BBnum Reachable by \n"); printf("------------------------------------------------\n"); for (BasicBlock* const block : Blocks()) { printf(FMT_BB " : ", block->bbNum); BlockSetOps::Iter iter(this, block->bbReach); unsigned bbNum = 0; while (iter.NextElem(&bbNum)) { printf(FMT_BB " ", bbNum); } printf("\n"); } } void Compiler::fgDispDoms() { // Don't bother printing this when we have a large number of BasicBlocks in the method if (fgBBcount > 256) { return; } printf("------------------------------------------------\n"); printf("BBnum Dominated by\n"); printf("------------------------------------------------\n"); for (unsigned i = 1; i <= fgBBNumMax; ++i) { BasicBlock* current = fgBBInvPostOrder[i]; printf(FMT_BB ": ", current->bbNum); while (current != current->bbIDom) { printf(FMT_BB " ", current->bbNum); current = current->bbIDom; } printf("\n"); } } /*****************************************************************************/ void Compiler::fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth /* = 0 */) { const unsigned __int64 flags = block->bbFlags; unsigned bbNumMax = impInlineRoot()->fgBBNumMax; int maxBlockNumWidth = CountDigits(bbNumMax); maxBlockNumWidth = max(maxBlockNumWidth, 2); int blockNumWidth = CountDigits(block->bbNum); blockNumWidth = max(blockNumWidth, 2); int blockNumPadding = maxBlockNumWidth - blockNumWidth; printf("%s %2u", block->dspToString(blockNumPadding), block->bbRefs); // // Display EH 'try' region index // if (block->hasTryIndex()) { printf(" %2u", block->getTryIndex()); } else { printf(" "); } // // Display EH handler region index // if (block->hasHndIndex()) { printf(" %2u", block->getHndIndex()); } else { printf(" "); } printf(" "); // // Display block predecessor list // unsigned charCnt; if (fgCheapPredsValid) { charCnt = block->dspCheapPreds(); } else { charCnt = block->dspPreds(); } if (charCnt < 19) { printf("%*s", 19 - charCnt, ""); } printf(" "); // // Display block weight // if (block->isMaxBBWeight()) { printf(" MAX "); } else { weight_t weight = block->getBBWeight(this); if (weight > 99999) // Is it going to be more than 6 characters? { if (weight <= 99999 * BB_UNITY_WEIGHT) { // print weight in this format ddddd. printf("%5u.", (unsigned)FloatingPointUtils::round(weight / BB_UNITY_WEIGHT)); } else // print weight in terms of k (i.e. 156k ) { // print weight in this format dddddk weight_t weightK = weight / 1000; printf("%5uk", (unsigned)FloatingPointUtils::round(weightK / BB_UNITY_WEIGHT)); } } else // print weight in this format ddd.dd { printf("%6s", refCntWtd2str(weight)); } } // // Display optional IBC weight column. // Note that iColWidth includes one character for a leading space, if there is an IBC column. // if (ibcColWidth > 0) { if (block->hasProfileWeight()) { printf("%*u", ibcColWidth, (unsigned)FloatingPointUtils::round(block->bbWeight)); } else { // No IBC data. Just print spaces to align the column. printf("%*s", ibcColWidth, ""); } } printf(" "); // // Display natural loop number // if (block->bbNatLoopNum == BasicBlock::NOT_IN_LOOP) { printf(" "); } else { printf("%2d ", block->bbNatLoopNum); } // // Display block IL range // block->dspBlockILRange(); // // Display block branch target // if (flags & BBF_REMOVED) { printf("[removed] "); } else { switch (block->bbJumpKind) { case BBJ_COND: printf("-> " FMT_BB "%*s ( cond )", block->bbJumpDest->bbNum, maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); break; case BBJ_CALLFINALLY: printf("-> " FMT_BB "%*s (callf )", block->bbJumpDest->bbNum, maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); break; case BBJ_ALWAYS: if (flags & BBF_KEEP_BBJ_ALWAYS) { printf("-> " FMT_BB "%*s (ALWAYS)", block->bbJumpDest->bbNum, maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); } else { printf("-> " FMT_BB "%*s (always)", block->bbJumpDest->bbNum, maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); } break; case BBJ_LEAVE: printf("-> " FMT_BB "%*s (leave )", block->bbJumpDest->bbNum, maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); break; case BBJ_EHFINALLYRET: printf("%*s (finret)", maxBlockNumWidth - 2, ""); break; case BBJ_EHFILTERRET: printf("%*s (fltret)", maxBlockNumWidth - 2, ""); break; case BBJ_EHCATCHRET: printf("-> " FMT_BB "%*s ( cret )", block->bbJumpDest->bbNum, maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); break; case BBJ_THROW: printf("%*s (throw )", maxBlockNumWidth - 2, ""); break; case BBJ_RETURN: printf("%*s (return)", maxBlockNumWidth - 2, ""); break; default: printf("%*s ", maxBlockNumWidth - 2, ""); break; case BBJ_SWITCH: { printf("->"); const BBswtDesc* const bbJumpSwt = block->bbJumpSwt; const unsigned jumpCnt = bbJumpSwt->bbsCount; BasicBlock** const jumpTab = bbJumpSwt->bbsDstTab; int switchWidth = 0; for (unsigned i = 0; i < jumpCnt; i++) { printf("%c" FMT_BB, (i == 0) ? ' ' : ',', jumpTab[i]->bbNum); switchWidth += 1 /* space/comma */ + 2 /* BB */ + max(CountDigits(jumpTab[i]->bbNum), 2); const bool isDefault = bbJumpSwt->bbsHasDefault && (i == jumpCnt - 1); if (isDefault) { printf("[def]"); switchWidth += 5; } const bool isDominant = bbJumpSwt->bbsHasDominantCase && (i == bbJumpSwt->bbsDominantCase); if (isDominant) { printf("[dom(" FMT_WT ")]", bbJumpSwt->bbsDominantFraction); switchWidth += 10; } } if (switchWidth < 7) { printf("%*s", 8 - switchWidth, ""); } printf(" (switch)"); } break; } } printf(" "); // // Display block EH region and type, including nesting indicator // if (block->hasTryIndex()) { printf("T%d ", block->getTryIndex()); } else { printf(" "); } if (block->hasHndIndex()) { printf("H%d ", block->getHndIndex()); } else { printf(" "); } if (flags & BBF_FUNCLET_BEG) { printf("F "); } else { printf(" "); } int cnt = 0; switch (block->bbCatchTyp) { case BBCT_NONE: break; case BBCT_FAULT: printf("fault "); cnt += 6; break; case BBCT_FINALLY: printf("finally "); cnt += 8; break; case BBCT_FILTER: printf("filter "); cnt += 7; break; case BBCT_FILTER_HANDLER: printf("filtHnd "); cnt += 8; break; default: printf("catch "); cnt += 6; break; } if (block->bbCatchTyp != BBCT_NONE) { cnt += 2; printf("{ "); /* brace matching editor workaround to compensate for the preceding line: } */ } if (flags & BBF_TRY_BEG) { // Output a brace for every try region that this block opens for (EHblkDsc* const HBtab : EHClauses(this)) { if (HBtab->ebdTryBeg == block) { cnt += 6; printf("try { "); /* brace matching editor workaround to compensate for the preceding line: } */ } } } for (EHblkDsc* const HBtab : EHClauses(this)) { if (HBtab->ebdTryLast == block) { cnt += 2; /* brace matching editor workaround to compensate for the following line: { */ printf("} "); } if (HBtab->ebdHndLast == block) { cnt += 2; /* brace matching editor workaround to compensate for the following line: { */ printf("} "); } if (HBtab->HasFilter() && block->bbNext == HBtab->ebdHndBeg) { cnt += 2; /* brace matching editor workaround to compensate for the following line: { */ printf("} "); } } while (cnt < 12) { cnt++; printf(" "); } // // Display block flags // block->dspFlags(); // Display OSR info // if (opts.IsOSR()) { if (block == fgEntryBB) { printf(" original-entry"); } if (block == fgOSREntryBB) { printf(" osr-entry"); } } printf("\n"); } /**************************************************************************** Dump blocks from firstBlock to lastBlock. */ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees) { BasicBlock* block; // If any block has IBC data, we add an "IBC weight" column just before the 'IL range' column. This column is as // wide as necessary to accommodate all the various IBC weights. It's at least 4 characters wide, to accommodate // the "IBC" title and leading space. int ibcColWidth = 0; for (block = firstBlock; block != nullptr; block = block->bbNext) { if (block->hasProfileWeight()) { int thisIbcWidth = CountDigits(block->bbWeight); ibcColWidth = max(ibcColWidth, thisIbcWidth); } if (block == lastBlock) { break; } } if (ibcColWidth > 0) { ibcColWidth = max(ibcColWidth, 3) + 1; // + 1 for the leading space } unsigned bbNumMax = impInlineRoot()->fgBBNumMax; int maxBlockNumWidth = CountDigits(bbNumMax); maxBlockNumWidth = max(maxBlockNumWidth, 2); int padWidth = maxBlockNumWidth - 2; // Account for functions with a large number of blocks. // clang-format off printf("\n"); printf("------%*s-------------------------------------%*s--------------------------%*s----------------------------------------\n", padWidth, "------------", ibcColWidth, "------------", maxBlockNumWidth, "----"); printf("BBnum %*sBBid ref try hnd %s weight %*s%s lp [IL range] [jump]%*s [EH region] [flags]\n", padWidth, "", fgCheapPredsValid ? "cheap preds" : (fgComputePredsDone ? "preds " : " "), ((ibcColWidth > 0) ? ibcColWidth - 3 : 0), "", // Subtract 3 for the width of "IBC", printed next. ((ibcColWidth > 0) ? "IBC" : ""), maxBlockNumWidth, "" ); printf("------%*s-------------------------------------%*s--------------------------%*s----------------------------------------\n", padWidth, "------------", ibcColWidth, "------------", maxBlockNumWidth, "----"); // clang-format on for (block = firstBlock; block; block = block->bbNext) { // First, do some checking on the bbPrev links if (block->bbPrev) { if (block->bbPrev->bbNext != block) { printf("bad prev link\n"); } } else if (block != fgFirstBB) { printf("bad prev link!\n"); } if (block == fgFirstColdBlock) { printf( "~~~~~~%*s~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%*s~~~~~~~~~~~~~~~~~~~~~~~~~~%*s~~~~~~~~~~~~~~~~~~~~~~~~" "~~~~~~~~~~~~~~~~\n", padWidth, "~~~~~~~~~~~~", ibcColWidth, "~~~~~~~~~~~~", maxBlockNumWidth, "~~~~"); } #if defined(FEATURE_EH_FUNCLETS) if (block == fgFirstFuncletBB) { printf( "++++++%*s+++++++++++++++++++++++++++++++++++++%*s++++++++++++++++++++++++++%*s++++++++++++++++++++++++" "++++++++++++++++ funclets follow\n", padWidth, "++++++++++++", ibcColWidth, "++++++++++++", maxBlockNumWidth, "++++"); } #endif // FEATURE_EH_FUNCLETS fgTableDispBasicBlock(block, ibcColWidth); if (block == lastBlock) { break; } } printf( "------%*s-------------------------------------%*s--------------------------%*s--------------------------------" "--------\n", padWidth, "------------", ibcColWidth, "------------", maxBlockNumWidth, "----"); if (dumpTrees) { fgDumpTrees(firstBlock, lastBlock); } } /*****************************************************************************/ void Compiler::fgDispBasicBlocks(bool dumpTrees) { fgDispBasicBlocks(fgFirstBB, nullptr, dumpTrees); } //------------------------------------------------------------------------ // fgDumpStmtTree: dump the statement and the basic block number. // // Arguments: // stmt - the statement to dump; // bbNum - the basic block number to dump. // void Compiler::fgDumpStmtTree(Statement* stmt, unsigned bbNum) { printf("\n***** " FMT_BB "\n", bbNum); gtDispStmt(stmt); } //------------------------------------------------------------------------ // Compiler::fgDumpBlock: dumps the contents of the given block to stdout. // // Arguments: // block - The block to dump. // void Compiler::fgDumpBlock(BasicBlock* block) { printf("\n------------ "); block->dspBlockHeader(this); if (!block->IsLIR()) { for (Statement* const stmt : block->Statements()) { fgDumpStmtTree(stmt, block->bbNum); } } else { gtDispRange(LIR::AsRange(block)); } } //------------------------------------------------------------------------ // fgDumpTrees: dumps the trees for every block in a range of blocks. // // Arguments: // firstBlock - The first block to dump. // lastBlock - The last block to dump. // void Compiler::fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock) { // Note that typically we have already called fgDispBasicBlocks() // so we don't need to print the preds and succs again here. for (BasicBlock* block = firstBlock; block != nullptr; block = block->bbNext) { fgDumpBlock(block); if (block == lastBlock) { break; } } printf("\n---------------------------------------------------------------------------------------------------------" "----------\n"); } /***************************************************************************** * Try to create as many candidates for GTF_MUL_64RSLT as possible. * We convert 'intOp1*intOp2' into 'int(long(nop(intOp1))*long(intOp2))'. */ /* static */ Compiler::fgWalkResult Compiler::fgStress64RsltMulCB(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; Compiler* pComp = data->compiler; if (tree->gtOper != GT_MUL || tree->gtType != TYP_INT || (tree->gtOverflow())) { return WALK_CONTINUE; } JITDUMP("STRESS_64RSLT_MUL before:\n"); DISPTREE(tree); // To ensure optNarrowTree() doesn't fold back to the original tree. tree->AsOp()->gtOp1 = pComp->gtNewCastNode(TYP_LONG, tree->AsOp()->gtOp1, false, TYP_LONG); tree->AsOp()->gtOp1 = pComp->gtNewOperNode(GT_NOP, TYP_LONG, tree->AsOp()->gtOp1); tree->AsOp()->gtOp1 = pComp->gtNewCastNode(TYP_LONG, tree->AsOp()->gtOp1, false, TYP_LONG); tree->AsOp()->gtOp2 = pComp->gtNewCastNode(TYP_LONG, tree->AsOp()->gtOp2, false, TYP_LONG); tree->gtType = TYP_LONG; *pTree = pComp->gtNewCastNode(TYP_INT, tree, false, TYP_INT); JITDUMP("STRESS_64RSLT_MUL after:\n"); DISPTREE(*pTree); return WALK_SKIP_SUBTREES; } void Compiler::fgStress64RsltMul() { if (!compStressCompile(STRESS_64RSLT_MUL, 20)) { return; } fgWalkAllTreesPre(fgStress64RsltMulCB, (void*)this); } // BBPredsChecker checks jumps from the block's predecessors to the block. class BBPredsChecker { public: BBPredsChecker(Compiler* compiler) : comp(compiler) { } unsigned CheckBBPreds(BasicBlock* block, unsigned curTraversalStamp); private: bool CheckEhTryDsc(BasicBlock* block, BasicBlock* blockPred, EHblkDsc* ehTryDsc); bool CheckEhHndDsc(BasicBlock* block, BasicBlock* blockPred, EHblkDsc* ehHndlDsc); bool CheckJump(BasicBlock* blockPred, BasicBlock* block); bool CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block); private: Compiler* comp; }; //------------------------------------------------------------------------ // CheckBBPreds: Check basic block predecessors list. // // Notes: // This DEBUG routine checks that all predecessors have the correct traversal stamp // and have correct jumps to the block. // It calculates the number of incoming edges from the internal block, // i.e. it does not count the global incoming edge for the first block. // // Arguments: // block - the block to process; // curTraversalStamp - current traversal stamp to distinguish different iterations. // // Return value: // the number of incoming edges for the block. unsigned BBPredsChecker::CheckBBPreds(BasicBlock* block, unsigned curTraversalStamp) { if (comp->fgCheapPredsValid) { return 0; } if (!comp->fgComputePredsDone) { assert(block->bbPreds == nullptr); return 0; } unsigned blockRefs = 0; for (flowList* const pred : block->PredEdges()) { blockRefs += pred->flDupCount; BasicBlock* blockPred = pred->getBlock(); // Make sure this pred is part of the BB list. assert(blockPred->bbTraversalStamp == curTraversalStamp); EHblkDsc* ehTryDsc = comp->ehGetBlockTryDsc(block); if (ehTryDsc != nullptr) { assert(CheckEhTryDsc(block, blockPred, ehTryDsc)); } EHblkDsc* ehHndDsc = comp->ehGetBlockHndDsc(block); if (ehHndDsc != nullptr) { assert(CheckEhHndDsc(block, blockPred, ehHndDsc)); } assert(CheckJump(blockPred, block)); } // Make sure preds are in increasing BBnum order // assert(block->checkPredListOrder()); return blockRefs; } bool BBPredsChecker::CheckEhTryDsc(BasicBlock* block, BasicBlock* blockPred, EHblkDsc* ehTryDsc) { // You can jump to the start of a try if (ehTryDsc->ebdTryBeg == block) { return true; } // You can jump within the same try region if (comp->bbInTryRegions(block->getTryIndex(), blockPred)) { return true; } // The catch block can jump back into the middle of the try if (comp->bbInCatchHandlerRegions(block, blockPred)) { return true; } // The end of a finally region is a BBJ_EHFINALLYRET block (during importing, BBJ_LEAVE) which // is marked as "returning" to the BBJ_ALWAYS block following the BBJ_CALLFINALLY // block that does a local call to the finally. This BBJ_ALWAYS is within // the try region protected by the finally (for x86, ARM), but that's ok. BasicBlock* prevBlock = block->bbPrev; if (prevBlock->bbJumpKind == BBJ_CALLFINALLY && block->bbJumpKind == BBJ_ALWAYS && blockPred->bbJumpKind == BBJ_EHFINALLYRET) { return true; } printf("Jump into the middle of try region: " FMT_BB " branches to " FMT_BB "\n", blockPred->bbNum, block->bbNum); assert(!"Jump into middle of try region"); return false; } bool BBPredsChecker::CheckEhHndDsc(BasicBlock* block, BasicBlock* blockPred, EHblkDsc* ehHndlDsc) { // You can do a BBJ_EHFINALLYRET or BBJ_EHFILTERRET into a handler region if (blockPred->KindIs(BBJ_EHFINALLYRET, BBJ_EHFILTERRET)) { return true; } // Our try block can call our finally block if ((block->bbCatchTyp == BBCT_FINALLY) && (blockPred->bbJumpKind == BBJ_CALLFINALLY) && comp->ehCallFinallyInCorrectRegion(blockPred, block->getHndIndex())) { return true; } // You can jump within the same handler region if (comp->bbInHandlerRegions(block->getHndIndex(), blockPred)) { return true; } // A filter can jump to the start of the filter handler if (ehHndlDsc->HasFilter()) { return true; } printf("Jump into the middle of handler region: " FMT_BB " branches to " FMT_BB "\n", blockPred->bbNum, block->bbNum); assert(!"Jump into the middle of handler region"); return false; } bool BBPredsChecker::CheckJump(BasicBlock* blockPred, BasicBlock* block) { switch (blockPred->bbJumpKind) { case BBJ_COND: assert(blockPred->bbNext == block || blockPred->bbJumpDest == block); return true; case BBJ_NONE: assert(blockPred->bbNext == block); return true; case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: assert(blockPred->bbJumpDest == block); return true; case BBJ_EHFINALLYRET: assert(CheckEHFinallyRet(blockPred, block)); return true; case BBJ_THROW: case BBJ_RETURN: assert(!"THROW and RETURN block cannot be in the predecessor list!"); break; case BBJ_SWITCH: for (BasicBlock* const bTarget : blockPred->SwitchTargets()) { if (block == bTarget) { return true; } } assert(!"SWITCH in the predecessor list with no jump label to BLOCK!"); break; default: assert(!"Unexpected bbJumpKind"); break; } return false; } bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) { // If the current block is a successor to a BBJ_EHFINALLYRET (return from finally), // then the lexically previous block should be a call to the same finally. // Verify all of that. unsigned hndIndex = blockPred->getHndIndex(); EHblkDsc* ehDsc = comp->ehGetDsc(hndIndex); BasicBlock* finBeg = ehDsc->ebdHndBeg; // Because there is no bbPrev, we have to search for the lexically previous // block. We can shorten the search by only looking in places where it is legal // to have a call to the finally. BasicBlock* begBlk; BasicBlock* endBlk; comp->ehGetCallFinallyBlockRange(hndIndex, &begBlk, &endBlk); for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) { continue; } if (block == bcall->bbNext) { return true; } } #if defined(FEATURE_EH_FUNCLETS) if (comp->fgFuncletsCreated) { // There is no easy way to search just the funclets that were pulled out of // the corresponding try body, so instead we search all the funclets, and if // we find a potential 'hit' we check if the funclet we're looking at is // from the correct try region. for (BasicBlock* const bcall : comp->Blocks(comp->fgFirstFuncletBB)) { if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) { continue; } if (block != bcall->bbNext) { continue; } if (comp->ehCallFinallyInCorrectRegion(bcall, hndIndex)) { return true; } } } #endif // FEATURE_EH_FUNCLETS assert(!"BBJ_EHFINALLYRET predecessor of block that doesn't follow a BBJ_CALLFINALLY!"); return false; } //------------------------------------------------------------------------------ // fgDebugCheckBBNumIncreasing: Check that the block list bbNum are in increasing order in the bbNext // traversal. Given a block B1 and its bbNext successor B2, this means `B1->bbNum < B2->bbNum`, but not // that `B1->bbNum + 1 == B2->bbNum` (which is true after renumbering). This can be used as a precondition // to a phase that expects this ordering to compare block numbers (say, to look for backwards branches) // and doesn't want to call fgRenumberBlocks(), to avoid that potential expense. // void Compiler::fgDebugCheckBBNumIncreasing() { for (BasicBlock* const block : Blocks()) { assert(block->bbNext == nullptr || (block->bbNum < block->bbNext->bbNum)); } } // This variable is used to generate "traversal labels": one-time constants with which // we label basic blocks that are members of the basic block list, in order to have a // fast, high-probability test for membership in that list. Type is "volatile" because // it's incremented with an atomic operation, which wants a volatile type; "long" so that // wrap-around to 0 (which I think has the highest probability of accidental collision) is // postponed a *long* time. static volatile int bbTraverseLabel = 1; /***************************************************************************** * * A DEBUG routine to check the consistency of the flowgraph, * i.e. bbNum, bbRefs, bbPreds have to be up to date. * *****************************************************************************/ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRefs /* = true */) { #ifdef DEBUG if (verbose) { printf("*************** In fgDebugCheckBBlist\n"); } #endif // DEBUG fgDebugCheckBlockLinks(); fgFirstBBisScratch(); if (fgBBcount > 10000 && expensiveDebugCheckLevel < 1) { // The basic block checks are too expensive if there are too many blocks, // so give up unless we've been told to try hard. return; } #if defined(FEATURE_EH_FUNCLETS) bool reachedFirstFunclet = false; if (fgFuncletsCreated) { // // Make sure that fgFirstFuncletBB is accurate. // It should be the first basic block in a handler region. // if (fgFirstFuncletBB != nullptr) { assert(fgFirstFuncletBB->hasHndIndex() == true); assert(fgFirstFuncletBB->bbFlags & BBF_FUNCLET_BEG); } } #endif // FEATURE_EH_FUNCLETS /* Check bbNum, bbRefs and bbPreds */ // First, pick a traversal stamp, and label all the blocks with it. unsigned curTraversalStamp = unsigned(InterlockedIncrement((LONG*)&bbTraverseLabel)); for (BasicBlock* const block : Blocks()) { block->bbTraversalStamp = curTraversalStamp; } for (BasicBlock* const block : Blocks()) { if (checkBBNum) { // Check that bbNum is sequential assert(block->bbNext == nullptr || (block->bbNum + 1 == block->bbNext->bbNum)); } // If the block is a BBJ_COND, a BBJ_SWITCH or a // lowered GT_SWITCH_TABLE node then make sure it // ends with a conditional jump or a GT_SWITCH if (block->bbJumpKind == BBJ_COND) { assert(block->lastNode()->gtNext == nullptr && block->lastNode()->OperIsConditionalJump()); } else if (block->bbJumpKind == BBJ_SWITCH) { assert(block->lastNode()->gtNext == nullptr && (block->lastNode()->gtOper == GT_SWITCH || block->lastNode()->gtOper == GT_SWITCH_TABLE)); } if (block->bbCatchTyp == BBCT_FILTER) { if (!fgCheapPredsValid) // Don't check cheap preds { // A filter has no predecessors assert(block->bbPreds == nullptr); } } #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { // // There should be no handler blocks until // we get to the fgFirstFuncletBB block, // then every block should be a handler block // if (!reachedFirstFunclet) { if (block == fgFirstFuncletBB) { assert(block->hasHndIndex() == true); reachedFirstFunclet = true; } else { assert(block->hasHndIndex() == false); } } else // reachedFirstFunclet { assert(block->hasHndIndex() == true); } } #endif // FEATURE_EH_FUNCLETS if (checkBBRefs) { assert(fgComputePredsDone); } BBPredsChecker checker(this); unsigned blockRefs = checker.CheckBBPreds(block, curTraversalStamp); // First basic block has an additional global incoming edge. if (block == fgFirstBB) { blockRefs += 1; } // Under OSR, if we also are keeping the original method entry around, // mark that as implicitly referenced as well. if (opts.IsOSR() && (block == fgEntryBB)) { blockRefs += 1; } /* Check the bbRefs */ if (checkBBRefs) { if (block->bbRefs != blockRefs) { // Check to see if this block is the beginning of a filter or a handler and adjust the ref count // appropriately. for (EHblkDsc* const HBtab : EHClauses(this)) { if (HBtab->ebdHndBeg == block) { blockRefs++; } if (HBtab->HasFilter() && (HBtab->ebdFilter == block)) { blockRefs++; } } } assert(block->bbRefs == blockRefs); } /* Check that BBF_HAS_HANDLER is valid bbTryIndex */ if (block->hasTryIndex()) { assert(block->getTryIndex() < compHndBBtabCount); } // A branch or fall-through to a BBJ_CALLFINALLY block must come from the `try` region associated // with the finally block the BBJ_CALLFINALLY is targeting. There is one special case: if the // BBJ_CALLFINALLY is the first block of a `try`, then its predecessor can be outside the `try`: // either a branch or fall-through to the first block. // // Note that this IR condition is a choice. It naturally occurs when importing EH constructs. // This condition prevents flow optimizations from skipping blocks in a `try` and branching // directly to the BBJ_CALLFINALLY. Relaxing this constraint would require careful thinking about // the implications, such as data flow optimizations. // // Don't depend on predecessors list for the check. for (BasicBlock* const succBlock : block->Succs()) { if (succBlock->bbJumpKind == BBJ_CALLFINALLY) { BasicBlock* finallyBlock = succBlock->bbJumpDest; assert(finallyBlock->hasHndIndex()); unsigned finallyIndex = finallyBlock->getHndIndex(); // Now make sure the block branching to the BBJ_CALLFINALLY is in the correct region. The branch // to the BBJ_CALLFINALLY can come from the try region of the finally block, or from a more nested // try region, e.g.: // try { // try { // LEAVE L_OUTER; // this becomes a branch to a BBJ_CALLFINALLY in an outer try region // // (in the FEATURE_EH_CALLFINALLY_THUNKS case) // } catch { // } // } finally { // } // L_OUTER: // EHblkDsc* ehDsc = ehGetDsc(finallyIndex); if (ehDsc->ebdTryBeg == succBlock) { // The BBJ_CALLFINALLY is the first block of it's `try` region. Don't check the predecessor. // Note that this case won't occur in the FEATURE_EH_CALLFINALLY_THUNKS case, since the // BBJ_CALLFINALLY in that case won't exist in the `try` region of the `finallyIndex`. } else { assert(bbInTryRegions(finallyIndex, block)); } } } /* Check if BBF_RUN_RARELY is set that we have bbWeight of zero */ if (block->isRunRarely()) { assert(block->bbWeight == BB_ZERO_WEIGHT); } else { assert(block->bbWeight > BB_ZERO_WEIGHT); } } // Make sure the one return BB is not changed. if (genReturnBB != nullptr) { assert(genReturnBB->GetFirstLIRNode() != nullptr || genReturnBB->bbStmtList != nullptr); } // The general encoder/decoder (currently) only reports "this" as a generics context as a stack location, // so we mark info.compThisArg as lvAddrTaken to ensure that it is not enregistered. Otherwise, it should // not be address-taken. This variable determines if the address-taken-ness of "thisArg" is "OK". bool copiedForGenericsCtxt; #ifndef JIT32_GCENCODER copiedForGenericsCtxt = ((info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0); #else // JIT32_GCENCODER copiedForGenericsCtxt = false; #endif // JIT32_GCENCODER // This if only in support of the noway_asserts it contains. if (info.compIsStatic) { // For static method, should have never grabbed the temp. assert(lvaArg0Var == BAD_VAR_NUM); } else { // For instance method: assert(info.compThisArg != BAD_VAR_NUM); bool compThisArgAddrExposedOK = !lvaTable[info.compThisArg].IsAddressExposed(); #ifndef JIT32_GCENCODER compThisArgAddrExposedOK = compThisArgAddrExposedOK || copiedForGenericsCtxt; #endif // !JIT32_GCENCODER // Should never expose the address of arg 0 or write to arg 0. // In addition, lvArg0Var should remain 0 if arg0 is not // written to or address-exposed. assert(compThisArgAddrExposedOK && !lvaTable[info.compThisArg].lvHasILStoreOp && (lvaArg0Var == info.compThisArg || (lvaArg0Var != info.compThisArg && (lvaTable[lvaArg0Var].IsAddressExposed() || lvaTable[lvaArg0Var].lvHasILStoreOp || copiedForGenericsCtxt)))); } } //------------------------------------------------------------------------ // fgDebugCheckFlags: Validate various invariants related to the propagation // and setting of tree flags ("gtFlags"). // // Arguments: // tree - the tree to (recursively) check the flags for // void Compiler::fgDebugCheckFlags(GenTree* tree) { GenTreeFlags actualFlags = tree->gtFlags & GTF_ALL_EFFECT; GenTreeFlags expectedFlags = GTF_EMPTY; if (tree->OperMayThrow(this)) { expectedFlags |= GTF_EXCEPT; } if (tree->OperRequiresAsgFlag()) { expectedFlags |= GTF_ASG; } if (tree->OperRequiresCallFlag(this)) { expectedFlags |= GTF_CALL; } // We reuse GTF_REVERSE_OPS as GTF_VAR_ARR_INDEX for LCL_VAR nodes. if (((tree->gtFlags & GTF_REVERSE_OPS) != 0) && !tree->OperIs(GT_LCL_VAR)) { assert(tree->OperSupportsReverseOpEvalOrder(this)); } GenTree* op1 = tree->OperIsSimple() ? tree->gtGetOp1() : nullptr; switch (tree->OperGet()) { case GT_CLS_VAR: expectedFlags |= GTF_GLOB_REF; break; case GT_CATCH_ARG: expectedFlags |= GTF_ORDER_SIDEEFF; break; case GT_MEMORYBARRIER: expectedFlags |= (GTF_GLOB_REF | GTF_ASG); break; case GT_LCL_VAR: assert((tree->gtFlags & GTF_VAR_FOLDED_IND) == 0); break; case GT_QMARK: assert(!op1->CanCSE()); assert(op1->OperIsCompare() || op1->IsIntegralConst(0) || op1->IsIntegralConst(1)); break; case GT_ASG: case GT_ADDR: // Note that this is a weak check - the "op1" location node can be a COMMA. assert(!op1->CanCSE()); break; case GT_IND: // Do we have a constant integer address as op1 that is also a handle? if (op1->IsCnsIntOrI() && op1->IsIconHandle()) { if ((tree->gtFlags & GTF_IND_INVARIANT) != 0) { actualFlags |= GTF_IND_INVARIANT; } if ((tree->gtFlags & GTF_IND_NONFAULTING) != 0) { actualFlags |= GTF_IND_NONFAULTING; } GenTreeFlags handleKind = op1->GetIconHandleFlag(); // Some of these aren't handles to invariant data... if ((handleKind == GTF_ICON_STATIC_HDL) || // Pointer to a mutable class Static variable (handleKind == GTF_ICON_BBC_PTR) || // Pointer to a mutable basic block count value (handleKind == GTF_ICON_GLOBAL_PTR)) // Pointer to mutable data from the VM state { // For statics, we expect the GTF_GLOB_REF to be set. However, we currently // fail to set it in a number of situations, and so this check is disabled. // TODO: enable checking of GTF_GLOB_REF. // expectedFlags |= GTF_GLOB_REF; } else // All the other handle indirections are considered invariant { expectedFlags |= GTF_IND_INVARIANT; } // Currently we expect all indirections with constant addresses to be nonfaulting. expectedFlags |= GTF_IND_NONFAULTING; } break; case GT_CALL: GenTreeCall* call; call = tree->AsCall(); if ((call->gtCallThisArg != nullptr) && ((call->gtCallThisArg->GetNode()->gtFlags & GTF_ASG) != 0)) { // TODO-Cleanup: this is a patch for a violation in our GT_ASG propagation. // see https://github.com/dotnet/runtime/issues/13758 actualFlags |= GTF_ASG; } for (GenTreeCall::Use& use : call->Args()) { if ((use.GetNode()->gtFlags & GTF_ASG) != 0) { // TODO-Cleanup: this is a patch for a violation in our GT_ASG propagation. // see https://github.com/dotnet/runtime/issues/13758 actualFlags |= GTF_ASG; } } for (GenTreeCall::Use& use : call->LateArgs()) { if ((use.GetNode()->gtFlags & GTF_ASG) != 0) { // TODO-Cleanup: this is a patch for a violation in our GT_ASG propagation. // see https://github.com/dotnet/runtime/issues/13758 actualFlags |= GTF_ASG; } } if (call->IsUnmanaged() && ((call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) != 0)) { if (call->gtCallArgs->GetNode()->OperGet() == GT_NOP) { assert(call->gtCallLateArgs->GetNode()->TypeIs(TYP_I_IMPL, TYP_BYREF)); } else { assert(call->gtCallArgs->GetNode()->TypeIs(TYP_I_IMPL, TYP_BYREF)); } } break; case GT_CMPXCHG: expectedFlags |= (GTF_GLOB_REF | GTF_ASG); break; default: break; } tree->VisitOperands([&](GenTree* operand) -> GenTree::VisitResult { // ASGs are nodes that produce no value, but have a type (essentially, the type of the location). // Validate that nodes that parent ASGs do not consume values. This check also ensures that code // which updates location types ("gsParamsToShadows" replaces small LCL_VARs with TYP_INT ones) // does not have to worry about propagating the new type "up the tree". // // Uncoditionally allowing COMMA here weakens the assert, but is necessary because the compiler // ("gtExtractSideEffList") can create "typed" "comma lists" with ASGs as second operands. // if (operand->OperIs(GT_ASG)) { assert(tree->IsCall() || tree->OperIs(GT_COMMA)); } fgDebugCheckFlags(operand); expectedFlags |= (operand->gtFlags & GTF_ALL_EFFECT); return GenTree::VisitResult::Continue; }); // ADDR nodes break the "parent flags >= operands flags" invariant for GTF_GLOB_REF. if (tree->OperIs(GT_ADDR) && op1->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_CLS_VAR)) { expectedFlags &= ~GTF_GLOB_REF; } fgDebugCheckFlagsHelper(tree, actualFlags, expectedFlags); } //------------------------------------------------------------------------------ // fgDebugCheckDispFlags: Wrapper function that displays GTF_IND_ flags // and then calls gtDispFlags to display the rest. // // Arguments: // tree - Tree whose flags are being checked // dispFlags - the first argument for gtDispFlags (flags to display), // including GTF_IND_INVARIANT, GTF_IND_NONFAULTING, GTF_IND_NONNULL // debugFlags - the second argument to gtDispFlags // void Compiler::fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags) { if (tree->OperGet() == GT_IND) { printf("%c", (dispFlags & GTF_IND_INVARIANT) ? '#' : '-'); printf("%c", (dispFlags & GTF_IND_NONFAULTING) ? 'n' : '-'); printf("%c", (dispFlags & GTF_IND_NONNULL) ? '@' : '-'); } GenTree::gtDispFlags(dispFlags, debugFlags); } //------------------------------------------------------------------------------ // fgDebugCheckFlagsHelper : Check if all bits that are set in chkFlags are also set in treeFlags. // // Arguments: // tree - Tree whose flags are being checked // actualFlags - Actual flags on the tree // expectedFlags - Expected flags // void Compiler::fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags) { if (expectedFlags & ~actualFlags) { // Print the tree so we can see it in the log. printf("Missing flags on tree [%06d]: ", dspTreeID(tree)); Compiler::fgDebugCheckDispFlags(tree, expectedFlags & ~actualFlags, GTF_DEBUG_NONE); printf("\n"); gtDispTree(tree); noway_assert(!"Missing flags on tree"); // Print the tree again so we can see it right after we hook up the debugger. printf("Missing flags on tree [%06d]: ", dspTreeID(tree)); Compiler::fgDebugCheckDispFlags(tree, expectedFlags & ~actualFlags, GTF_DEBUG_NONE); printf("\n"); gtDispTree(tree); } else if (actualFlags & ~expectedFlags) { // We can't/don't consider these flags (GTF_GLOB_REF or GTF_ORDER_SIDEEFF) as being "extra" flags // GenTreeFlags flagsToCheck = ~GTF_GLOB_REF & ~GTF_ORDER_SIDEEFF; if ((actualFlags & ~expectedFlags & flagsToCheck) != 0) { // Print the tree so we can see it in the log. printf("Extra flags on tree [%06d]: ", dspTreeID(tree)); Compiler::fgDebugCheckDispFlags(tree, actualFlags & ~expectedFlags, GTF_DEBUG_NONE); printf("\n"); gtDispTree(tree); noway_assert(!"Extra flags on tree"); // Print the tree again so we can see it right after we hook up the debugger. printf("Extra flags on tree [%06d]: ", dspTreeID(tree)); Compiler::fgDebugCheckDispFlags(tree, actualFlags & ~expectedFlags, GTF_DEBUG_NONE); printf("\n"); gtDispTree(tree); } } } // DEBUG routine to check correctness of the internal gtNext, gtPrev threading of a statement. // This threading is only valid when fgStmtListThreaded is true. // This calls an alternate method for FGOrderLinear. void Compiler::fgDebugCheckNodeLinks(BasicBlock* block, Statement* stmt) { // LIR blocks are checked using BasicBlock::CheckLIR(). if (block->IsLIR()) { LIR::AsRange(block).CheckLIR(this); // TODO: return? } assert(fgStmtListThreaded); noway_assert(stmt->GetTreeList()); // The first node's gtPrev must be nullptr (the gtPrev list is not circular). // The last node's gtNext must be nullptr (the gtNext list is not circular). This is tested if the loop below // terminates. assert(stmt->GetTreeList()->gtPrev == nullptr); for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext) { if (tree->gtPrev) { noway_assert(tree->gtPrev->gtNext == tree); } else { noway_assert(tree == stmt->GetTreeList()); } if (tree->gtNext) { noway_assert(tree->gtNext->gtPrev == tree); } else { noway_assert(tree == stmt->GetRootNode()); } /* Cross-check gtPrev,gtNext with GetOp() for simple trees */ GenTree* expectedPrevTree = nullptr; if (tree->OperIsLeaf()) { if (tree->gtOper == GT_CATCH_ARG) { // The GT_CATCH_ARG should always have GTF_ORDER_SIDEEFF set noway_assert(tree->gtFlags & GTF_ORDER_SIDEEFF); // The GT_CATCH_ARG has to be the first thing evaluated noway_assert(stmt == block->FirstNonPhiDef()); noway_assert(stmt->GetTreeList()->gtOper == GT_CATCH_ARG); // The root of the tree should have GTF_ORDER_SIDEEFF set noway_assert(stmt->GetRootNode()->gtFlags & GTF_ORDER_SIDEEFF); } } if (tree->OperIsUnary() && tree->AsOp()->gtOp1) { expectedPrevTree = tree->AsOp()->gtOp1; } else if (tree->OperIsBinary() && tree->AsOp()->gtOp1) { switch (tree->gtOper) { case GT_QMARK: // "then" operand of the GT_COLON (generated second). expectedPrevTree = tree->AsOp()->gtOp2->AsColon()->ThenNode(); break; case GT_COLON: expectedPrevTree = tree->AsColon()->ElseNode(); // "else" branch result (generated first). break; default: if (tree->AsOp()->gtOp2) { if (tree->gtFlags & GTF_REVERSE_OPS) { expectedPrevTree = tree->AsOp()->gtOp1; } else { expectedPrevTree = tree->AsOp()->gtOp2; } } else { expectedPrevTree = tree->AsOp()->gtOp1; } break; } } noway_assert(expectedPrevTree == nullptr || // No expectations about the prev node tree->gtPrev == expectedPrevTree); // The "normal" case } } /***************************************************************************** * * A DEBUG routine to check the correctness of the links between statements * and ordinary nodes within a statement. * ****************************************************************************/ void Compiler::fgDebugCheckLinks(bool morphTrees) { // This used to be only on for stress, and there was a comment stating that // it was "quite an expensive operation" but I did not find that to be true. // Set DO_SANITY_DEBUG_CHECKS to false to revert to that behavior. const bool DO_SANITY_DEBUG_CHECKS = true; if (!DO_SANITY_DEBUG_CHECKS && !compStressCompile(STRESS_CHK_FLOW_UPDATE, 30)) { return; } fgDebugCheckBlockLinks(); // For each block check the links between the trees. for (BasicBlock* const block : Blocks()) { if (block->IsLIR()) { LIR::AsRange(block).CheckLIR(this); } else { fgDebugCheckStmtsList(block, morphTrees); } } fgDebugCheckNodesUniqueness(); } //------------------------------------------------------------------------------ // fgDebugCheckStmtsList : Perfoms the set of checks: // - all statements in the block are linked correctly // - check statements flags // - check nodes gtNext and gtPrev values, if the node list is threaded // // Arguments: // block - the block to check statements in // morphTrees - try to morph trees in the checker // // Note: // Checking that all bits that are set in treeFlags are also set in chkFlags is currently disabled. void Compiler::fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees) { for (Statement* const stmt : block->Statements()) { // Verify that bbStmtList is threaded correctly. // Note that for the statements list, the GetPrevStmt() list is circular. // The GetNextStmt() list is not: GetNextStmt() of the last statement in a block is nullptr. noway_assert(stmt->GetPrevStmt() != nullptr); if (stmt == block->bbStmtList) { noway_assert(stmt->GetPrevStmt()->GetNextStmt() == nullptr); } else { noway_assert(stmt->GetPrevStmt()->GetNextStmt() == stmt); } if (stmt->GetNextStmt() != nullptr) { noway_assert(stmt->GetNextStmt()->GetPrevStmt() == stmt); } else { noway_assert(block->lastStmt() == stmt); } /* For each statement check that the exception flags are properly set */ noway_assert(stmt->GetRootNode()); if (verbose && 0) { gtDispTree(stmt->GetRootNode()); } fgDebugCheckFlags(stmt->GetRootNode()); // Not only will this stress fgMorphBlockStmt(), but we also get all the checks // done by fgMorphTree() if (morphTrees) { // If 'stmt' is removed from the block, start a new check for the current block, // break the current check. if (fgMorphBlockStmt(block, stmt DEBUGARG("test morphing"))) { fgDebugCheckStmtsList(block, morphTrees); break; } } // For each statement check that the nodes are threaded correctly - m_treeList. if (fgStmtListThreaded) { fgDebugCheckNodeLinks(block, stmt); } } } // ensure that bbNext and bbPrev are consistent void Compiler::fgDebugCheckBlockLinks() { assert(fgFirstBB->bbPrev == nullptr); for (BasicBlock* const block : Blocks()) { if (block->bbNext) { assert(block->bbNext->bbPrev == block); } else { assert(block == fgLastBB); } if (block->bbPrev) { assert(block->bbPrev->bbNext == block); } else { assert(block == fgFirstBB); } // If this is a switch, check that the tables are consistent. // Note that we don't call GetSwitchDescMap(), because it has the side-effect // of allocating it if it is not present. if (block->bbJumpKind == BBJ_SWITCH && m_switchDescMap != nullptr) { SwitchUniqueSuccSet uniqueSuccSet; if (m_switchDescMap->Lookup(block, &uniqueSuccSet)) { // Create a set with all the successors. Don't use BlockSet, so we don't need to worry // about the BlockSet epoch. BitVecTraits bitVecTraits(fgBBNumMax + 1, this); BitVec succBlocks(BitVecOps::MakeEmpty(&bitVecTraits)); for (BasicBlock* const bTarget : block->SwitchTargets()) { BitVecOps::AddElemD(&bitVecTraits, succBlocks, bTarget->bbNum); } // Now we should have a set of unique successors that matches what's in the switchMap. // First, check the number of entries, then make sure all the blocks in uniqueSuccSet // are in the BlockSet. unsigned count = BitVecOps::Count(&bitVecTraits, succBlocks); assert(uniqueSuccSet.numDistinctSuccs == count); for (unsigned i = 0; i < uniqueSuccSet.numDistinctSuccs; i++) { assert(BitVecOps::IsMember(&bitVecTraits, succBlocks, uniqueSuccSet.nonDuplicates[i]->bbNum)); } } } } } // UniquenessCheckWalker keeps data that is neccesary to check // that each tree has it is own unique id and they do not repeat. class UniquenessCheckWalker { public: UniquenessCheckWalker(Compiler* comp) : comp(comp), nodesVecTraits(comp->compGenTreeID, comp), uniqueNodes(BitVecOps::MakeEmpty(&nodesVecTraits)) { } //------------------------------------------------------------------------ // fgMarkTreeId: Visit all subtrees in the tree and check gtTreeIDs. // // Arguments: // pTree - Pointer to the tree to walk // fgWalkPre - the UniquenessCheckWalker instance // static Compiler::fgWalkResult MarkTreeId(GenTree** pTree, Compiler::fgWalkData* fgWalkPre) { UniquenessCheckWalker* walker = static_cast<UniquenessCheckWalker*>(fgWalkPre->pCallbackData); unsigned gtTreeID = (*pTree)->gtTreeID; walker->CheckTreeId(gtTreeID); return Compiler::WALK_CONTINUE; } //------------------------------------------------------------------------ // CheckTreeId: Check that this tree was not visited before and memorize it as visited. // // Arguments: // gtTreeID - identificator of GenTree. // // Note: // This method causes an assert failure when we find a duplicated node in our tree // void CheckTreeId(unsigned gtTreeID) { if (BitVecOps::IsMember(&nodesVecTraits, uniqueNodes, gtTreeID)) { if (comp->verbose) { printf("Duplicate gtTreeID was found: %d\n", gtTreeID); } assert(!"Duplicate gtTreeID was found"); } else { BitVecOps::AddElemD(&nodesVecTraits, uniqueNodes, gtTreeID); } } private: Compiler* comp; BitVecTraits nodesVecTraits; BitVec uniqueNodes; }; //------------------------------------------------------------------------------ // fgDebugCheckNodesUniqueness: Check that each tree in the method has its own unique gtTreeId. // void Compiler::fgDebugCheckNodesUniqueness() { UniquenessCheckWalker walker(this); for (BasicBlock* const block : Blocks()) { if (block->IsLIR()) { for (GenTree* i : LIR::AsRange(block)) { walker.CheckTreeId(i->gtTreeID); } } else { for (Statement* const stmt : block->Statements()) { GenTree* root = stmt->GetRootNode(); fgWalkTreePre(&root, UniquenessCheckWalker::MarkTreeId, &walker); } } } } //------------------------------------------------------------------------------ // fgDebugCheckLoopTable: checks that the loop table is valid. // - If the method has natural loops, the loop table is not null // - Loop `top` must come before `bottom`. // - Loop `entry` must be between `top` and `bottom`. // - Children loops of a loop are disjoint. // - All basic blocks with loop numbers set have a corresponding loop in the table // - All basic blocks without a loop number are not in a loop // - All parents of the loop with the block contain that block // - If the loop has a pre-header, it is valid // - The loop flags are valid // void Compiler::fgDebugCheckLoopTable() { #ifdef DEBUG if (verbose) { printf("*************** In fgDebugCheckLoopTable\n"); } #endif // DEBUG if (optLoopCount > 0) { assert(optLoopTable != nullptr); } // Build a mapping from existing block list number (bbNum) to the block number it would be after the // blocks are renumbered. This allows making asserts about the relative ordering of blocks using block number // without actually renumbering the blocks, which would affect non-DEBUG code paths. Note that there may be // `blockNumMap[bbNum] == 0` if the `bbNum` block was deleted and blocks haven't been renumbered since // the deletion. unsigned bbNumMax = impInlineRoot()->fgBBNumMax; // blockNumMap[old block number] => new block number size_t blockNumBytes = (bbNumMax + 1) * sizeof(unsigned); unsigned* blockNumMap = (unsigned*)_alloca(blockNumBytes); memset(blockNumMap, 0, blockNumBytes); unsigned newBBnum = 1; for (BasicBlock* const block : Blocks()) { if ((block->bbFlags & BBF_REMOVED) == 0) { assert(1 <= block->bbNum && block->bbNum <= bbNumMax); assert(blockNumMap[block->bbNum] == 0); // If this fails, we have two blocks with the same block number. blockNumMap[block->bbNum] = newBBnum++; } } struct MappedChecks { static bool lpWellFormed(const unsigned* blockNumMap, const LoopDsc* loop) { return (blockNumMap[loop->lpTop->bbNum] <= blockNumMap[loop->lpEntry->bbNum]) && (blockNumMap[loop->lpEntry->bbNum] <= blockNumMap[loop->lpBottom->bbNum]) && ((blockNumMap[loop->lpHead->bbNum] < blockNumMap[loop->lpTop->bbNum]) || (blockNumMap[loop->lpHead->bbNum] > blockNumMap[loop->lpBottom->bbNum])); } static bool lpContains(const unsigned* blockNumMap, const LoopDsc* loop, const BasicBlock* blk) { return (blockNumMap[loop->lpTop->bbNum] <= blockNumMap[blk->bbNum]) && (blockNumMap[blk->bbNum] <= blockNumMap[loop->lpBottom->bbNum]); } static bool lpContains(const unsigned* blockNumMap, const LoopDsc* loop, const BasicBlock* top, const BasicBlock* bottom) { return (blockNumMap[loop->lpTop->bbNum] <= blockNumMap[top->bbNum]) && (blockNumMap[bottom->bbNum] < blockNumMap[loop->lpBottom->bbNum]); } static bool lpContains(const unsigned* blockNumMap, const LoopDsc* loop, const LoopDsc& lp2) { return lpContains(blockNumMap, loop, lp2.lpTop, lp2.lpBottom); } static bool lpContainedBy(const unsigned* blockNumMap, const LoopDsc* loop, const BasicBlock* top, const BasicBlock* bottom) { return (blockNumMap[top->bbNum] <= blockNumMap[loop->lpTop->bbNum]) && (blockNumMap[loop->lpBottom->bbNum] < blockNumMap[bottom->bbNum]); } static bool lpContainedBy(const unsigned* blockNumMap, const LoopDsc* loop, const LoopDsc& lp2) { return lpContainedBy(blockNumMap, loop, lp2.lpTop, lp2.lpBottom); } static bool lpDisjoint(const unsigned* blockNumMap, const LoopDsc* loop, const BasicBlock* top, const BasicBlock* bottom) { return (blockNumMap[bottom->bbNum] < blockNumMap[loop->lpTop->bbNum]) || (blockNumMap[loop->lpBottom->bbNum] < blockNumMap[top->bbNum]); } static bool lpDisjoint(const unsigned* blockNumMap, const LoopDsc* loop, const LoopDsc& lp2) { return lpDisjoint(blockNumMap, loop, lp2.lpTop, lp2.lpBottom); } }; // Check the loop table itself. int preHeaderCount = 0; for (unsigned i = 0; i < optLoopCount; i++) { const LoopDsc& loop = optLoopTable[i]; // Ignore removed loops if (loop.lpFlags & LPFLG_REMOVED) { continue; } assert(loop.lpHead != nullptr); assert(loop.lpTop != nullptr); assert(loop.lpEntry != nullptr); assert(loop.lpBottom != nullptr); assert(MappedChecks::lpWellFormed(blockNumMap, &loop)); if (loop.lpExitCnt == 1) { assert(loop.lpExit != nullptr); assert(MappedChecks::lpContains(blockNumMap, &loop, loop.lpExit)); } else { assert(loop.lpExit == nullptr); } if (loop.lpParent == BasicBlock::NOT_IN_LOOP) { // This is a top-level loop. // Verify all top-level loops are disjoint. We don't have a list of just these (such as a // top-level pseudo-loop entry with a list of all top-level lists), so we have to iterate // over the entire loop table. for (unsigned j = 0; j < optLoopCount; j++) { if (i == j) { // Don't compare against ourselves. continue; } const LoopDsc& otherLoop = optLoopTable[j]; if (otherLoop.lpFlags & LPFLG_REMOVED) { continue; } if (otherLoop.lpParent != BasicBlock::NOT_IN_LOOP) { // Only consider top-level loops continue; } assert(MappedChecks::lpDisjoint(blockNumMap, &loop, otherLoop)); } } else { // This is not a top-level loop assert(loop.lpParent != BasicBlock::NOT_IN_LOOP); assert(loop.lpParent < optLoopCount); assert(loop.lpParent < i); // outer loops come before inner loops in the table const LoopDsc& parentLoop = optLoopTable[loop.lpParent]; assert((parentLoop.lpFlags & LPFLG_REMOVED) == 0); // don't allow removed parent loop? assert(MappedChecks::lpContainedBy(blockNumMap, &loop, optLoopTable[loop.lpParent])); } if (loop.lpChild != BasicBlock::NOT_IN_LOOP) { // Verify all child loops are contained in the parent loop. for (unsigned child = loop.lpChild; // child != BasicBlock::NOT_IN_LOOP; // child = optLoopTable[child].lpSibling) { assert(child < optLoopCount); assert(i < child); // outer loops come before inner loops in the table const LoopDsc& childLoop = optLoopTable[child]; if (childLoop.lpFlags & LPFLG_REMOVED) // removed child loop might still be in table { continue; } assert(MappedChecks::lpContains(blockNumMap, &loop, childLoop)); assert(childLoop.lpParent == i); } // Verify all child loops are disjoint. for (unsigned child = loop.lpChild; // child != BasicBlock::NOT_IN_LOOP; // child = optLoopTable[child].lpSibling) { const LoopDsc& childLoop = optLoopTable[child]; if (childLoop.lpFlags & LPFLG_REMOVED) { continue; } for (unsigned child2 = optLoopTable[child].lpSibling; // child2 != BasicBlock::NOT_IN_LOOP; // child2 = optLoopTable[child2].lpSibling) { const LoopDsc& child2Loop = optLoopTable[child2]; if (child2Loop.lpFlags & LPFLG_REMOVED) { continue; } assert(MappedChecks::lpDisjoint(blockNumMap, &childLoop, child2Loop)); } } } // If the loop has a pre-header, ensure the pre-header form is correct. if ((loop.lpFlags & LPFLG_HAS_PREHEAD) != 0) { ++preHeaderCount; BasicBlock* h = loop.lpHead; assert(h->bbFlags & BBF_LOOP_PREHEADER); // The pre-header can only be BBJ_ALWAYS or BBJ_NONE and must enter the loop. BasicBlock* e = loop.lpEntry; if (h->bbJumpKind == BBJ_ALWAYS) { assert(h->bbJumpDest == e); } else { assert(h->bbJumpKind == BBJ_NONE); assert(h->bbNext == e); assert(loop.lpTop == e); assert(loop.lpIsTopEntry()); } // The entry block has a single non-loop predecessor, and it is the pre-header. for (BasicBlock* const predBlock : e->PredBlocks()) { if (predBlock != h) { assert(MappedChecks::lpContains(blockNumMap, &loop, predBlock)); } } loop.lpValidatePreHeader(); } // Check the flags. // Note that the various limit flags are only used when LPFLG_ITER is set, but they are set first, // separately, and only if everything works out is LPFLG_ITER set. If LPFLG_ITER is NOT set, the // individual flags are not un-set (arguably, they should be). // Only one of the `limit` flags can be set. (Note that LPFLG_SIMD_LIMIT is a "sub-flag" that can be // set when LPFLG_CONST_LIMIT is set.) assert(genCountBits((unsigned)(loop.lpFlags & (LPFLG_VAR_LIMIT | LPFLG_CONST_LIMIT | LPFLG_ARRLEN_LIMIT))) <= 1); // LPFLG_SIMD_LIMIT can only be set if LPFLG_CONST_LIMIT is set. if (loop.lpFlags & LPFLG_SIMD_LIMIT) { assert(loop.lpFlags & LPFLG_CONST_LIMIT); } if (loop.lpFlags & LPFLG_CONST_INIT) { assert(loop.lpInitBlock != nullptr); } if (loop.lpFlags & LPFLG_ITER) { loop.VERIFY_lpIterTree(); loop.VERIFY_lpTestTree(); } } // Check basic blocks for loop annotations. for (BasicBlock* const block : Blocks()) { if (optLoopCount == 0) { assert(block->bbNatLoopNum == BasicBlock::NOT_IN_LOOP); continue; } // Walk the loop table and find the first loop that contains our block. // It should be the innermost one. int loopNum = BasicBlock::NOT_IN_LOOP; for (int i = optLoopCount - 1; i >= 0; i--) { // Ignore removed loops if (optLoopTable[i].lpFlags & LPFLG_REMOVED) { continue; } // Does this loop contain our block? if (MappedChecks::lpContains(blockNumMap, &optLoopTable[i], block)) { loopNum = i; break; } } // If there is at least one loop that contains this block... if (loopNum != BasicBlock::NOT_IN_LOOP) { // ...it must be the one pointed to by bbNatLoopNum. assert(block->bbNatLoopNum == loopNum); // TODO: We might want the following assert, but there are cases where we don't move all // return blocks out of the loop. // Return blocks are not allowed inside a loop; they should have been moved elsewhere. // assert(block->bbJumpKind != BBJ_RETURN); } else { // Otherwise, this block should not point to a loop. assert(block->bbNatLoopNum == BasicBlock::NOT_IN_LOOP); } // All loops that contain the innermost loop with this block must also contain this block. while (loopNum != BasicBlock::NOT_IN_LOOP) { assert(MappedChecks::lpContains(blockNumMap, &optLoopTable[loopNum], block)); loopNum = optLoopTable[loopNum].lpParent; } if (block->bbFlags & BBF_LOOP_PREHEADER) { // Note that the bbNatLoopNum will not point to the loop where this is a pre-header, since bbNatLoopNum // is only set on the blocks from `top` to `bottom`, and `head` is outside that. --preHeaderCount; } } // Verify that the number of loops marked as having pre-headers is the same as the number of blocks // with the pre-header flag set. assert(preHeaderCount == 0); } /*****************************************************************************/ #endif // DEBUG
1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/jit/loopcloning.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LoopCloning XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef DEBUG //-------------------------------------------------------------------------------------------------- // ArrIndex::Print - debug print an ArrIndex struct in form: `V01[V02][V03]`. // // Arguments: // dim (Optional) Print up to but not including this dimension. Default: print all dimensions. // void ArrIndex::Print(unsigned dim /* = -1 */) { printf("V%02d", arrLcl); for (unsigned i = 0; i < ((dim == (unsigned)-1) ? rank : dim); ++i) { printf("[V%02d]", indLcls.Get(i)); } } //-------------------------------------------------------------------------------------------------- // ArrIndex::PrintBoundsCheckNodes - debug print an ArrIndex struct bounds check node tree ids in // form: `[000125][000113]`. // // Arguments: // dim (Optional) Print up to but not including this dimension. Default: print all dimensions. // void ArrIndex::PrintBoundsCheckNodes(unsigned dim /* = -1 */) { for (unsigned i = 0; i < ((dim == (unsigned)-1) ? rank : dim); ++i) { Compiler::printTreeID(bndsChks.Get(i)); } } #endif // DEBUG //-------------------------------------------------------------------------------------------------- // ToGenTree - Convert an arrLen operation into a gentree node. // // Arguments: // comp Compiler instance to allocate trees // bb Basic block of the new tree // // Return Values: // Returns the gen tree representation for arrLen or MD Array node as defined by // the "type" member // // Notes: // This tree produces GT_INDEX node, the caller is supposed to morph it appropriately // so it can be codegen'ed. // GenTree* LC_Array::ToGenTree(Compiler* comp, BasicBlock* bb) { // If jagged array if (type == Jagged) { // Create a a[i][j][k].length type node. GenTree* arr = comp->gtNewLclvNode(arrIndex->arrLcl, comp->lvaTable[arrIndex->arrLcl].lvType); int rank = GetDimRank(); for (int i = 0; i < rank; ++i) { arr = comp->gtNewIndexRef(TYP_REF, arr, comp->gtNewLclvNode(arrIndex->indLcls[i], comp->lvaTable[arrIndex->indLcls[i]].lvType)); // Clear the range check flag and mark the index as non-faulting: we guarantee that all necessary range // checking has already been done by the time this array index expression is invoked. arr->gtFlags &= ~(GTF_INX_RNGCHK | GTF_EXCEPT); arr->gtFlags |= GTF_INX_NOFAULT; } // If asked for arrlen invoke arr length operator. if (oper == ArrLen) { GenTree* arrLen = comp->gtNewArrLen(TYP_INT, arr, OFFSETOF__CORINFO_Array__length, bb); // We already guaranteed (by a sequence of preceding checks) that the array length operator will not // throw an exception because we null checked the base array. // So, we should be able to do the following: // arrLen->gtFlags &= ~GTF_EXCEPT; // arrLen->gtFlags |= GTF_IND_NONFAULTING; // However, we then end up with a mix of non-faulting array length operators as well as normal faulting // array length operators in the slow-path of the cloned loops. CSE doesn't keep these separate, so bails // out on creating CSEs on this very useful type of CSE, leading to CQ losses in the cloned loop fast path. // TODO-CQ: fix this. return arrLen; } else { assert(oper == None); return arr; } } else { // TODO-CQ: Optimize for MD Array. assert(!"Optimize for MD Array"); } return nullptr; } //-------------------------------------------------------------------------------------------------- // ToGenTree - Convert an "identifier" into a gentree node. // // Arguments: // comp Compiler instance to allocate trees // bb Basic block of the new tree // // Return Values: // Returns the gen tree representation for either a constant or a variable or an arrLen operation // defined by the "type" member // GenTree* LC_Ident::ToGenTree(Compiler* comp, BasicBlock* bb) { // Convert to GenTree nodes. switch (type) { case Const: assert(constant <= INT32_MAX); return comp->gtNewIconNode(constant); case Var: return comp->gtNewLclvNode(constant, comp->lvaTable[constant].lvType); case ArrLen: return arrLen.ToGenTree(comp, bb); case Null: return comp->gtNewIconNode(0, TYP_REF); default: assert(!"Could not convert LC_Ident to GenTree"); unreached(); break; } } //-------------------------------------------------------------------------------------------------- // ToGenTree - Convert an "expression" into a gentree node. // // Arguments: // comp Compiler instance to allocate trees // bb Basic block of the new tree // // Return Values: // Returns the gen tree representation for either a constant or a variable or an arrLen operation // defined by the "type" member // GenTree* LC_Expr::ToGenTree(Compiler* comp, BasicBlock* bb) { // Convert to GenTree nodes. switch (type) { case Ident: return ident.ToGenTree(comp, bb); default: assert(!"Could not convert LC_Expr to GenTree"); unreached(); break; } } //-------------------------------------------------------------------------------------------------- // ToGenTree - Convert a "condition" into a gentree node. // // Arguments: // comp Compiler instance to allocate trees // bb Basic block of the new tree // invert `true` if the condition should be inverted // // Return Values: // Returns the GenTree representation for the conditional operator on lhs and rhs trees // GenTree* LC_Condition::ToGenTree(Compiler* comp, BasicBlock* bb, bool invert) { GenTree* op1Tree = op1.ToGenTree(comp, bb); GenTree* op2Tree = op2.ToGenTree(comp, bb); assert(genTypeSize(genActualType(op1Tree->TypeGet())) == genTypeSize(genActualType(op2Tree->TypeGet()))); return comp->gtNewOperNode(invert ? GenTree::ReverseRelop(oper) : oper, TYP_INT, op1Tree, op2Tree); } //-------------------------------------------------------------------------------------------------- // Evaluates - Evaluate a given loop cloning condition if it can be statically evaluated. // // Arguments: // pResult OUT parameter. The evaluation result // // Return Values: // Returns true if the condition can be statically evaluated. If the condition's result // is statically unknown then return false. In other words, `*pResult` is valid only if the // function returns true. // bool LC_Condition::Evaluates(bool* pResult) { switch (oper) { case GT_EQ: case GT_GE: case GT_LE: // If op1 == op2 then equality should result in true. if (op1 == op2) { *pResult = true; return true; } break; case GT_GT: case GT_LT: case GT_NE: // If op1 == op2 then inequality should result in false. if (op1 == op2) { *pResult = false; return true; } break; default: // for all other 'oper' kinds, we will return false break; } return false; } //-------------------------------------------------------------------------------------------------- // Combines - Check whether two conditions would combine to yield a single new condition. // // Arguments: // cond The condition that is checked if it would combine with "*this" condition. // newCond The resulting combined condition. // // Return Values: // Returns true if "cond" combines with the "this" condition. // "newCond" contains the combines condition. // // Operation: // Check if both conditions are equal. If so, return just 1 of them. // Reverse their operators and check if their reversed operands match. If so, return either of them. // // Notes: // This is not a full-fledged expression optimizer, it is supposed // to remove redundant conditions that are generated for optimization // opportunities. Anything further should be implemented as needed. // For example, for (i = beg; i < end; i += inc) a[i]. Then, the conditions // would be: "beg >= 0, end <= a.len, inc > 0" bool LC_Condition::Combines(const LC_Condition& cond, LC_Condition* newCond) { if (oper == cond.oper && op1 == cond.op1 && op2 == cond.op2) { *newCond = *this; return true; } else if ((oper == GT_LT || oper == GT_LE || oper == GT_GT || oper == GT_GE) && GenTree::ReverseRelop(oper) == cond.oper && op1 == cond.op2 && op2 == cond.op1) { *newCond = *this; return true; } return false; } //-------------------------------------------------------------------------------------------------- // GetLoopOptInfo - Retrieve the loop opt info candidate array. // // Arguments: // loopNum the loop index. // // Return Values: // Return the optInfo array member. The method doesn't allocate memory. // JitExpandArrayStack<LcOptInfo*>* LoopCloneContext::GetLoopOptInfo(unsigned loopNum) { return optInfo[loopNum]; } //-------------------------------------------------------------------------------------------------- // CancelLoopOptInfo - Cancel loop cloning optimization for this loop. // // Arguments: // loopNum the loop index. // // Return Values: // None. // void LoopCloneContext::CancelLoopOptInfo(unsigned loopNum) { JITDUMP("Cancelling loop cloning for loop " FMT_LP "\n", loopNum); optInfo[loopNum] = nullptr; if (conditions[loopNum] != nullptr) { conditions[loopNum]->Reset(); conditions[loopNum] = nullptr; } } //-------------------------------------------------------------------------------------------------- // EnsureLoopOptInfo - Retrieve the loop opt info candidate array, if it is not present, allocate // memory. // // Arguments: // loopNum the loop index. // // Return Values: // The array of optimization candidates for the loop. // JitExpandArrayStack<LcOptInfo*>* LoopCloneContext::EnsureLoopOptInfo(unsigned loopNum) { if (optInfo[loopNum] == nullptr) { optInfo[loopNum] = new (alloc) JitExpandArrayStack<LcOptInfo*>(alloc, 4); } return optInfo[loopNum]; } //-------------------------------------------------------------------------------------------------- // EnsureLoopOptInfo - Retrieve the loop cloning conditions candidate array, // if it is not present, allocate memory. // // Arguments: // loopNum the loop index. // // Return Values: // The array of cloning conditions for the loop. // JitExpandArrayStack<LC_Condition>* LoopCloneContext::EnsureConditions(unsigned loopNum) { if (conditions[loopNum] == nullptr) { conditions[loopNum] = new (alloc) JitExpandArrayStack<LC_Condition>(alloc, 4); } return conditions[loopNum]; } //-------------------------------------------------------------------------------------------------- // GetConditions - Get the cloning conditions array for the loop, no allocation. // // Arguments: // loopNum the loop index. // // Return Values: // The array of cloning conditions for the loop. // JitExpandArrayStack<LC_Condition>* LoopCloneContext::GetConditions(unsigned loopNum) { return conditions[loopNum]; } //-------------------------------------------------------------------------------------------------- // EnsureDerefs - Ensure an array of dereferences is created if it doesn't exist. // // Arguments: // loopNum the loop index. // // Return Values: // The array of dereferences for the loop. // JitExpandArrayStack<LC_Array>* LoopCloneContext::EnsureDerefs(unsigned loopNum) { if (derefs[loopNum] == nullptr) { derefs[loopNum] = new (alloc) JitExpandArrayStack<LC_Array>(alloc, 4); } return derefs[loopNum]; } //-------------------------------------------------------------------------------------------------- // HasBlockConditions - Check if there are block level conditions for the loop. // // Arguments: // loopNum the loop index. // // Return Values: // Return true if there are any block level conditions. // bool LoopCloneContext::HasBlockConditions(unsigned loopNum) { JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* levelCond = blockConditions[loopNum]; if (levelCond == nullptr) { return false; } // Walk through each block to check if any of them has conditions. for (unsigned i = 0; i < levelCond->Size(); ++i) { if ((*levelCond)[i]->Size() > 0) { return true; } } return false; } //-------------------------------------------------------------------------------------------------- // GetBlockConditions - Return block level conditions for the loop. // // Arguments: // loopNum the loop index. // // Return Values: // Return block conditions. // JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* LoopCloneContext::GetBlockConditions(unsigned loopNum) { assert(HasBlockConditions(loopNum)); return blockConditions[loopNum]; } //-------------------------------------------------------------------------------------------------- // EnsureBlockConditions - Allocate block level conditions for the loop if not exists. // // Arguments: // loopNum the loop index. // condBlocks the number of block-level conditions for each loop, corresponding to the blocks // created. // // Return Values: // Return block conditions. // JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* LoopCloneContext::EnsureBlockConditions(unsigned loopNum, unsigned condBlocks) { if (blockConditions[loopNum] == nullptr) { blockConditions[loopNum] = new (alloc) JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>(alloc, condBlocks); } JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* levelCond = blockConditions[loopNum]; for (unsigned i = 0; i < condBlocks; ++i) { levelCond->Set(i, new (alloc) JitExpandArrayStack<LC_Condition>(alloc)); } return levelCond; } #ifdef DEBUG void LoopCloneContext::PrintBlockConditions(unsigned loopNum) { printf("Block conditions:\n"); JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* blockConds = blockConditions[loopNum]; if (blockConds == nullptr || blockConds->Size() == 0) { printf("No block conditions\n"); return; } for (unsigned i = 0; i < blockConds->Size(); ++i) { PrintBlockLevelConditions(i, (*blockConds)[i]); } } void LoopCloneContext::PrintBlockLevelConditions(unsigned level, JitExpandArrayStack<LC_Condition>* levelCond) { printf("%d = ", level); for (unsigned j = 0; j < levelCond->Size(); ++j) { if (j != 0) { printf(" && "); } printf("("); (*levelCond)[j].Print(); printf(")"); } printf("\n"); } #endif //-------------------------------------------------------------------------------------------------- // EvaluateConditions - Evaluate the loop cloning conditions statically, if they can be evaluated. // // Arguments: // loopNum the loop index. // pAllTrue OUT parameter. `*pAllTrue` is set to `true` if all the cloning conditions statically // evaluate to true. // pAnyFalse OUT parameter. `*pAnyFalse` is set to `true` if some cloning condition statically // evaluate to false. // verbose verbose logging required. // // Return Values: // None. // // Operation: // For example, a condition like "V02 >= V02" statically evaluates to true. Caller should detect such // conditions and remove them from the "conditions" array. // // Similarly, conditions like "V02 > V02" will evaluate to "false". In this case caller has to abort // loop cloning optimization for the loop. Note that the assumption for conditions is that they will // all be "AND"ed, so statically we know we will never take the fast path. // // Sometimes we simply can't say statically whether "V02 > V01.length" is true or false. // In that case, `*pAllTrue` will be false because this condition doesn't evaluate to "true" and // `*pAnyFalse` could be false if no other condition statically evaluates to "false". // // If `*pAnyFalse` is true, we set that and return, and `*pAllTrue` is not accurate, since the loop cloning // needs to be aborted. // void LoopCloneContext::EvaluateConditions(unsigned loopNum, bool* pAllTrue, bool* pAnyFalse DEBUGARG(bool verbose)) { bool allTrue = true; bool anyFalse = false; JitExpandArrayStack<LC_Condition>& conds = *conditions[loopNum]; JITDUMP("Evaluating %d loop cloning conditions for loop " FMT_LP "\n", conds.Size(), loopNum); assert(conds.Size() > 0); for (unsigned i = 0; i < conds.Size(); ++i) { #ifdef DEBUG if (verbose) { printf("Considering condition %d: (", i); conds[i].Print(); } #endif bool res = false; // Check if this condition evaluates to true or false. if (conds[i].Evaluates(&res)) { JITDUMP(") evaluates to %s\n", dspBool(res)); if (!res) { anyFalse = true; // Since this will force us to abort loop cloning, there is no need compute an accurate `allTrue`, // so we can break out of the loop now. break; } } else { JITDUMP("), could not be evaluated\n"); allTrue = false; } } JITDUMP("Evaluation result allTrue = %s, anyFalse = %s\n", dspBool(allTrue), dspBool(anyFalse)); *pAllTrue = allTrue; *pAnyFalse = anyFalse; } //-------------------------------------------------------------------------------------------------- // OptimizeConditions - Evaluate the loop cloning conditions statically, if they can be evaluated // then optimize the "conditions" array accordingly. // // Arguments: // conds The conditions array to optimize. // // Return Values: // None. // // Operation: // For example, a condition like "V02 >= V02" statically evaluates to true. Remove such conditions // from the "conditions" array. // // Similarly, conditions like "V02 > V02" will evaluate to "false". In this case abort loop cloning // optimization for the loop. // // Sometimes, two conditions will combine together to yield a single condition, then remove a // duplicate condition. void LoopCloneContext::OptimizeConditions(JitExpandArrayStack<LC_Condition>& conds) { for (unsigned i = 0; i < conds.Size(); ++i) { // Check if the conditions evaluate. bool result = false; if (conds[i].Evaluates(&result)) { // If statically known to be true, then remove this condition. if (result) { conds.Remove(i); --i; continue; } else { // Some condition is statically false, then simply indicate // not to clone this loop. CancelLoopOptInfo(i); break; } } // Check for all other conditions[j], if it would combine with // conditions[i]. for (unsigned j = i + 1; j < conds.Size(); ++j) { LC_Condition newCond; if (conds[i].Combines(conds[j], &newCond)) { conds.Remove(j); conds[i] = newCond; i = -1; break; } } } #ifdef DEBUG // Make sure we didn't miss some combining. for (unsigned i = 0; i < conds.Size(); ++i) { for (unsigned j = 0; j < conds.Size(); ++j) { LC_Condition newCond; if ((i != j) && conds[i].Combines(conds[j], &newCond)) { assert(!"Loop cloning conditions can still be optimized further."); } } } #endif } //-------------------------------------------------------------------------------------------------- // OptimizeBlockConditions - Optimize block level conditions. // // Arguments: // loopNum the loop index. // // Operation: // Calls OptimizeConditions helper on block level conditions. // // Return Values: // None. // void LoopCloneContext::OptimizeBlockConditions(unsigned loopNum DEBUGARG(bool verbose)) { if (!HasBlockConditions(loopNum)) { return; } JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* levelCond = blockConditions[loopNum]; for (unsigned i = 0; i < levelCond->Size(); ++i) { OptimizeConditions(*((*levelCond)[i])); } #ifdef DEBUG if (verbose) { printf("After optimizing block-level cloning conditions\n\t"); PrintConditions(loopNum); printf("\n"); } #endif } //-------------------------------------------------------------------------------------------------- // OptimizeConditions - Optimize cloning conditions. // // Arguments: // loopNum the loop index. // verbose verbose logging required. // // Operation: // Calls OptimizeConditions helper on cloning conditions. // // Return Values: // None. // void LoopCloneContext::OptimizeConditions(unsigned loopNum DEBUGARG(bool verbose)) { #ifdef DEBUG if (verbose) { printf("Before optimizing cloning conditions\n\t"); PrintConditions(loopNum); printf("\n"); } #endif JitExpandArrayStack<LC_Condition>& conds = *conditions[loopNum]; OptimizeConditions(conds); #ifdef DEBUG if (verbose) { printf("After optimizing cloning conditions\n\t"); PrintConditions(loopNum); printf("\n"); } #endif } #ifdef DEBUG //-------------------------------------------------------------------------------------------------- // PrintConditions - Print loop cloning conditions necessary to clone the loop. // // Arguments: // loopNum the loop index. // // Return Values: // None. // void LoopCloneContext::PrintConditions(unsigned loopNum) { if (conditions[loopNum] == nullptr) { printf("NO conditions"); return; } if (conditions[loopNum]->Size() == 0) { printf("Conditions were optimized away! Will always take cloned path."); return; } for (unsigned i = 0; i < conditions[loopNum]->Size(); ++i) { if (i != 0) { printf(" && "); } printf("("); (*conditions[loopNum])[i].Print(); printf(")"); } } #endif //-------------------------------------------------------------------------------------------------- // CondToStmtInBlock: Convert an array of conditions to IR. Evaluate them into a JTRUE stmt and add it to // a new block after `insertAfter`. // // Arguments: // comp Compiler instance // conds Array of conditions to evaluate into a JTRUE stmt // slowHead Branch here on condition failure // insertAfter Insert the conditions in a block after this block // // Notes: // If any condition fails, branch to the `slowHead` block. There are two options here: // 1. Generate all the conditions in a single block using bitwise `&` to merge them, e.g.: // jmpTrue(cond1 & cond2 ... == 0) => slowHead // In this form, we always execute all the conditions (there is no short-circuit evaluation). // Since we expect that in the usual case all the conditions will fail, and we'll execute the // loop fast path, the lack of short-circuit evaluation is not a problem. If the code is smaller // and faster, this would be preferable. // 2. Generate each condition in a separate block, e.g.: // jmpTrue(!cond1) => slowHead // jmpTrue(!cond2) => slowHead // ... // If this code is smaller/faster, this can be preferable. Also, the flow graph is more normal, // and amenable to downstream flow optimizations. // // Which option we choose is currently compile-time determined. // // We assume that `insertAfter` is a fall-through block, and we add it to the predecessors list // of the first newly added block. `insertAfter` is also assumed to be in the same loop (we can // clone its loop number). // // Return Value: // Last block added // BasicBlock* LoopCloneContext::CondToStmtInBlock(Compiler* comp, JitExpandArrayStack<LC_Condition>& conds, BasicBlock* slowHead, BasicBlock* insertAfter) { noway_assert(conds.Size() > 0); assert(slowHead != nullptr); assert(insertAfter->KindIs(BBJ_NONE, BBJ_COND)); // Choose how to generate the conditions const bool generateOneConditionPerBlock = true; if (generateOneConditionPerBlock) { BasicBlock* newBlk = nullptr; for (unsigned i = 0; i < conds.Size(); ++i) { newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true); newBlk->inheritWeight(insertAfter); newBlk->bbNatLoopNum = insertAfter->bbNatLoopNum; newBlk->bbJumpDest = slowHead; JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, newBlk->bbJumpDest->bbNum); comp->fgAddRefPred(newBlk->bbJumpDest, newBlk); JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", insertAfter->bbNum, newBlk->bbNum); comp->fgAddRefPred(newBlk, insertAfter); JITDUMP("Adding conditions %u to " FMT_BB "\n", i, newBlk->bbNum); GenTree* cond = conds[i].ToGenTree(comp, newBlk, /* invert */ true); GenTree* jmpTrueTree = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, cond); Statement* stmt = comp->fgNewStmtFromTree(jmpTrueTree); comp->fgInsertStmtAtEnd(newBlk, stmt); // Remorph. JITDUMP("Loop cloning condition tree before morphing:\n"); DBEXEC(comp->verbose, comp->gtDispTree(jmpTrueTree)); JITDUMP("\n"); comp->fgMorphBlockStmt(newBlk, stmt DEBUGARG("Loop cloning condition")); insertAfter = newBlk; } return newBlk; } else { BasicBlock* newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true); newBlk->inheritWeight(insertAfter); newBlk->bbNatLoopNum = insertAfter->bbNatLoopNum; newBlk->bbJumpDest = slowHead; JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, newBlk->bbJumpDest->bbNum); comp->fgAddRefPred(newBlk->bbJumpDest, newBlk); JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", insertAfter->bbNum, newBlk->bbNum); comp->fgAddRefPred(newBlk, insertAfter); JITDUMP("Adding conditions to " FMT_BB "\n", newBlk->bbNum); // Get the first condition. GenTree* cond = conds[0].ToGenTree(comp, newBlk, /* invert */ false); for (unsigned i = 1; i < conds.Size(); ++i) { // Append all conditions using AND operator. cond = comp->gtNewOperNode(GT_AND, TYP_INT, cond, conds[i].ToGenTree(comp, newBlk, /* invert */ false)); } // Add "cond == 0" node cond = comp->gtNewOperNode(GT_EQ, TYP_INT, cond, comp->gtNewIconNode(0)); // Add jmpTrue "cond == 0" GenTree* jmpTrueTree = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, cond); Statement* stmt = comp->fgNewStmtFromTree(jmpTrueTree); comp->fgInsertStmtAtEnd(newBlk, stmt); // Remorph. JITDUMP("Loop cloning condition tree before morphing:\n"); DBEXEC(comp->verbose, comp->gtDispTree(jmpTrueTree)); JITDUMP("\n"); comp->fgMorphBlockStmt(newBlk, stmt DEBUGARG("Loop cloning condition")); return newBlk; } } //-------------------------------------------------------------------------------------------------- // Lcl - the current node's local variable. // // Arguments: // None. // // Operation: // If level is 0, then just return the array base. Else return the index variable on dim 'level' // // Return Values: // The local variable in the node's level. // unsigned LC_Deref::Lcl() { unsigned lvl = level; if (lvl == 0) { return array.arrIndex->arrLcl; } lvl--; return array.arrIndex->indLcls[lvl]; } //-------------------------------------------------------------------------------------------------- // HasChildren - Check if there are children to 'this' node. // // Arguments: // None. // // Return Values: // Return true if children are present. // bool LC_Deref::HasChildren() { return children != nullptr && children->Size() > 0; } //-------------------------------------------------------------------------------------------------- // DeriveLevelConditions - Generate conditions for each level of the tree. // // Arguments: // conds An array of conditions for each level i.e., (level x conditions). This array will // contain the conditions for the tree at the end of the method. // // Operation: // level0 yields only (a != null) condition. All other levels yield two conditions: // (level < a[...].length && a[...][level] != null) // // Return Values: // None // void LC_Deref::DeriveLevelConditions(JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* conds) { if (level == 0) { // For level 0, just push (a != null). (*conds)[level]->Push( LC_Condition(GT_NE, LC_Expr(LC_Ident(Lcl(), LC_Ident::Var)), LC_Expr(LC_Ident(LC_Ident::Null)))); } else { // Adjust for level0 having just 1 condition and push condition (i < a.len). LC_Array arrLen = array; arrLen.oper = LC_Array::ArrLen; arrLen.dim = level - 1; (*conds)[level * 2 - 1]->Push( LC_Condition(GT_LT, LC_Expr(LC_Ident(Lcl(), LC_Ident::Var)), LC_Expr(LC_Ident(arrLen)))); // Push condition (a[i] != null) LC_Array arrTmp = array; arrTmp.dim = level; (*conds)[level * 2]->Push(LC_Condition(GT_NE, LC_Expr(LC_Ident(arrTmp)), LC_Expr(LC_Ident(LC_Ident::Null)))); } // Invoke on the children recursively. if (HasChildren()) { for (unsigned i = 0; i < children->Size(); ++i) { (*children)[i]->DeriveLevelConditions(conds); } } } //-------------------------------------------------------------------------------------------------- // EnsureChildren - Create an array of child nodes if nullptr. // // Arguments: // alloc CompAllocator instance // // Return Values: // None // void LC_Deref::EnsureChildren(CompAllocator alloc) { if (children == nullptr) { children = new (alloc) JitExpandArrayStack<LC_Deref*>(alloc); } } //-------------------------------------------------------------------------------------------------- // Find - Find the node representing the local variable in child nodes of the 'this' node. // // Arguments: // lcl the local to find in the children array // // Return Values: // The child node if found or nullptr. // LC_Deref* LC_Deref::Find(unsigned lcl) { return Find(children, lcl); } //-------------------------------------------------------------------------------------------------- // Find - Find the node representing the local variable in a list of nodes. // // Arguments: // lcl the local to find. // children the list of nodes to find the node representing the lcl. // // Return Values: // The node if found or nullptr. // // static LC_Deref* LC_Deref::Find(JitExpandArrayStack<LC_Deref*>* children, unsigned lcl) { if (children == nullptr) { return nullptr; } for (unsigned i = 0; i < children->Size(); ++i) { if ((*children)[i]->Lcl() == lcl) { return (*children)[i]; } } return nullptr; } //------------------------------------------------------------------------ // optDeriveLoopCloningConditions: Derive loop cloning conditions. // // Arguments: // loopNum - the current loop index for which conditions are derived. // context - data structure where all loop cloning info is kept. // // Return Value: // "false" if conditions cannot be obtained. "true" otherwise. // The cloning conditions are updated in the "conditions"[loopNum] field // of the "context" parameter. // // Operation: // Inspect the loop cloning optimization candidates and populate the conditions necessary // for each optimization candidate. Checks if the loop stride is "> 0" if the loop // condition is `<` or `<=`. If the initializer is "var" init then adds condition // "var >= 0", and if the loop is var limit then, "var >= 0" and "var <= a.len" // are added to "context". These conditions are checked in the pre-header block // and the cloning choice is made. // // Assumption: // Callers should assume AND operation is used i.e., if all conditions are // true, then take the fast path. // bool Compiler::optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context) { JITDUMP("------------------------------------------------------------\n"); JITDUMP("Deriving cloning conditions for " FMT_LP "\n", loopNum); LoopDsc* loop = &optLoopTable[loopNum]; JitExpandArrayStack<LcOptInfo*>* optInfos = context->GetLoopOptInfo(loopNum); if (GenTree::StaticOperIs(loop->lpTestOper(), GT_LT, GT_LE)) { // Stride conditions if (loop->lpIterConst() <= 0) { JITDUMP("> Stride %d is invalid\n", loop->lpIterConst()); return false; } // Init conditions if (loop->lpFlags & LPFLG_CONST_INIT) { // Only allowing non-negative const init at this time. // REVIEW: why? if (loop->lpConstInit < 0) { JITDUMP("> Init %d is invalid\n", loop->lpConstInit); return false; } } else if (loop->lpFlags & LPFLG_VAR_INIT) { // initVar >= 0 const unsigned initLcl = loop->lpVarInit; if (!genActualTypeIsInt(lvaGetDesc(initLcl))) { JITDUMP("> Init var V%02u not compatible with TYP_INT\n", initLcl); return false; } LC_Condition geZero(GT_GE, LC_Expr(LC_Ident(initLcl, LC_Ident::Var)), LC_Expr(LC_Ident(0, LC_Ident::Const))); context->EnsureConditions(loopNum)->Push(geZero); } else { JITDUMP("> Not variable init\n"); return false; } // Limit Conditions LC_Ident ident; if (loop->lpFlags & LPFLG_CONST_LIMIT) { int limit = loop->lpConstLimit(); if (limit < 0) { JITDUMP("> limit %d is invalid\n", limit); return false; } ident = LC_Ident(static_cast<unsigned>(limit), LC_Ident::Const); } else if (loop->lpFlags & LPFLG_VAR_LIMIT) { const unsigned limitLcl = loop->lpVarLimit(); if (!genActualTypeIsInt(lvaGetDesc(limitLcl))) { JITDUMP("> Limit var V%02u not compatible with TYP_INT\n", limitLcl); return false; } ident = LC_Ident(limitLcl, LC_Ident::Var); LC_Condition geZero(GT_GE, LC_Expr(ident), LC_Expr(LC_Ident(0, LC_Ident::Const))); context->EnsureConditions(loopNum)->Push(geZero); } else if (loop->lpFlags & LPFLG_ARRLEN_LIMIT) { ArrIndex* index = new (getAllocator(CMK_LoopClone)) ArrIndex(getAllocator(CMK_LoopClone)); if (!loop->lpArrLenLimit(this, index)) { JITDUMP("> ArrLen not matching"); return false; } ident = LC_Ident(LC_Array(LC_Array::Jagged, index, LC_Array::ArrLen)); // Ensure that this array must be dereference-able, before executing the actual condition. LC_Array array(LC_Array::Jagged, index, LC_Array::None); context->EnsureDerefs(loopNum)->Push(array); } else { JITDUMP("> Undetected limit\n"); return false; } // GT_LT loop test: limit <= arrLen // GT_LE loop test: limit < arrLen genTreeOps opLimitCondition; switch (loop->lpTestOper()) { case GT_LT: opLimitCondition = GT_LE; break; case GT_LE: opLimitCondition = GT_LT; break; default: unreached(); } for (unsigned i = 0; i < optInfos->Size(); ++i) { LcOptInfo* optInfo = optInfos->Get(i); switch (optInfo->GetOptType()) { case LcOptInfo::LcJaggedArray: { LcJaggedArrayOptInfo* arrIndexInfo = optInfo->AsLcJaggedArrayOptInfo(); LC_Array arrLen(LC_Array::Jagged, &arrIndexInfo->arrIndex, arrIndexInfo->dim, LC_Array::ArrLen); LC_Ident arrLenIdent = LC_Ident(arrLen); LC_Condition cond(opLimitCondition, LC_Expr(ident), LC_Expr(arrLenIdent)); context->EnsureConditions(loopNum)->Push(cond); // Ensure that this array must be dereference-able, before executing the actual condition. LC_Array array(LC_Array::Jagged, &arrIndexInfo->arrIndex, arrIndexInfo->dim, LC_Array::None); context->EnsureDerefs(loopNum)->Push(array); } break; case LcOptInfo::LcMdArray: { LcMdArrayOptInfo* mdArrInfo = optInfo->AsLcMdArrayOptInfo(); LC_Array arrLen(LC_Array(LC_Array::MdArray, mdArrInfo->GetArrIndexForDim(getAllocator(CMK_LoopClone)), mdArrInfo->dim, LC_Array::None)); LC_Ident arrLenIdent = LC_Ident(arrLen); LC_Condition cond(opLimitCondition, LC_Expr(ident), LC_Expr(arrLenIdent)); context->EnsureConditions(loopNum)->Push(cond); // TODO: ensure array is dereference-able? } break; default: JITDUMP("Unknown opt\n"); return false; } } JITDUMP("Conditions: "); DBEXEC(verbose, context->PrintConditions(loopNum)); JITDUMP("\n"); return true; } return false; } //------------------------------------------------------------------------------------ // optComputeDerefConditions: Derive loop cloning conditions for dereferencing arrays. // // Arguments: // loopNum - the current loop index for which conditions are derived. // context - data structure where all loop cloning info is kept. // // Return Value: // "false" if conditions cannot be obtained. "true" otherwise. // The deref conditions are updated in the "derefConditions"[loopNum] field // of the "context" parameter. // // Definition of Deref Conditions: // To be able to check for the loop cloning condition that (limitVar <= a.len) // we should first be able to dereference "a". i.e., "a" is non-null. // // Example: // // for (i in 0..n) // for (j in 0..n) // for (k in 0..n) // Inner most loop is being cloned. Cloning needs to check if // // (n <= a[i][j].len) and other safer conditions to take the fast path // a[i][j][k] = 0 // // Now, we want to deref a[i][j] to invoke length operator on it to perform the cloning fast path check. // This involves deref of (a), (a[i]), (a[i][j]), therefore, the following should first // be true to do the deref. // // (a != null) && (i < a.len) && (a[i] != null) && (j < a[i].len) && (a[i][j] != null) --> condition set (1) // // Note the short circuiting AND. Implication: these conditions should be performed in separate // blocks each of which will branch to slow path if the condition evaluates to false. // // Now, imagine a situation where, in the inner loop above, in addition to "a[i][j][k] = 0" we // also have: // a[x][y][k] = 20 // where x and y are parameters, then our conditions will have to include: // (x < a.len) && // (y < a[x].len) // in addition to the above conditions (1) to get rid of bounds check on index 'k' // // But these conditions can be checked together with conditions // (i < a.len) without a need for a separate block. In summary, the conditions will be: // // (a != null) && // ((i < a.len) & (x < a.len)) && <-- Note the bitwise AND here. // (a[i] != null & a[x] != null) && <-- Note the bitwise AND here. // (j < a[i].len & y < a[x].len) && <-- Note the bitwise AND here. // (a[i][j] != null & a[x][y] != null) <-- Note the bitwise AND here. // // This naturally yields a tree style pattern, where the nodes of the tree are // the array and indices respectively. // // Example: // a => { // i => { // j => { // k => {} // } // }, // x => { // y => { // k => {} // } // } // } // // Notice that the variables in the same levels can have their conditions combined in the // same block with a bitwise AND. Whereas, the conditions in consecutive levels will be // combined with a short-circuiting AND (i.e., different basic blocks). // // Operation: // Construct a tree of array indices and the array which will generate the optimal // conditions for loop cloning. // // a[i][j][k], b[i] and a[i][y][k] are the occurrences in the loop. Then, the tree should be: // // a => { // i => { // j => { // k => {} // }, // y => { // k => {} // }, // } // }, // b => { // i => {} // } // // In this method, we will construct such a tree by descending depth first into the array // index operation and forming a tree structure as we encounter the array or the index variables. // // This tree structure will then be used to generate conditions like below: // (a != null) & (b != null) && // from the first level of the tree. // // (i < a.len) & (i < b.len) && // from the second level of the tree. Levels can be combined. // (a[i] != null) & (b[i] != null) && // from the second level of the tree. // // (j < a[i].len) & (y < a[i].len) && // from the third level. // (a[i][j] != null) & (a[i][y] != null) && // from the third level. // // and so on. // bool Compiler::optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context) { JitExpandArrayStack<LC_Deref*> nodes(getAllocator(CMK_LoopClone)); int maxRank = -1; // Get the dereference-able arrays. JitExpandArrayStack<LC_Array>* deref = context->EnsureDerefs(loopNum); // For each array in the dereference list, construct a tree, // where the nodes are array and index variables and an edge 'u-v' // exists if a node 'v' indexes node 'u' directly as in u[v] or an edge // 'u-v-w' transitively if u[v][w] occurs. for (unsigned i = 0; i < deref->Size(); ++i) { LC_Array& array = (*deref)[i]; // First populate the array base variable. LC_Deref* node = LC_Deref::Find(&nodes, array.arrIndex->arrLcl); if (node == nullptr) { node = new (getAllocator(CMK_LoopClone)) LC_Deref(array, 0 /*level*/); nodes.Push(node); } // For each dimension (level) for the array, populate the tree with the variable // from that dimension. unsigned rank = (unsigned)array.GetDimRank(); for (unsigned i = 0; i < rank; ++i) { node->EnsureChildren(getAllocator(CMK_LoopClone)); LC_Deref* tmp = node->Find(array.arrIndex->indLcls[i]); if (tmp == nullptr) { tmp = new (getAllocator(CMK_LoopClone)) LC_Deref(array, node->level + 1); node->children->Push(tmp); } // Descend one level down. node = tmp; } // Keep the maxRank of all array dereferences. maxRank = max((int)rank, maxRank); } #ifdef DEBUG if (verbose) { printf("Deref condition tree:\n"); for (unsigned i = 0; i < nodes.Size(); ++i) { nodes[i]->Print(); printf("\n"); } } #endif if (maxRank == -1) { JITDUMP("> maxRank undefined\n"); return false; } // First level will always yield the null-check, since it is made of the array base variables. // All other levels (dimensions) will yield two conditions ex: (i < a.length && a[i] != null) // So add 1 after rank * 2. unsigned condBlocks = (unsigned)maxRank * 2 + 1; // Heuristic to not create too many blocks. Defining as 3 allows, effectively, loop cloning on // doubly-nested loops. // REVIEW: make this based on a COMPlus configuration, at least for debug? const unsigned maxAllowedCondBlocks = 3; if (condBlocks > maxAllowedCondBlocks) { JITDUMP("> Too many condition blocks (%u > %u)\n", condBlocks, maxAllowedCondBlocks); return false; } // Derive conditions into an 'array of level x array of conditions' i.e., levelCond[levels][conds] JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* levelCond = context->EnsureBlockConditions(loopNum, condBlocks); for (unsigned i = 0; i < nodes.Size(); ++i) { nodes[i]->DeriveLevelConditions(levelCond); } DBEXEC(verbose, context->PrintBlockConditions(loopNum)); return true; } #ifdef DEBUG //---------------------------------------------------------------------------- // optDebugLogLoopCloning: Insert a call to jithelper that prints a message. // // Arguments: // block - the block in which the helper call needs to be inserted. // insertBefore - the stmt before which the helper call will be inserted. // void Compiler::optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore) { if (JitConfig.JitDebugLogLoopCloning() == 0) { return; } GenTree* logCall = gtNewHelperCallNode(CORINFO_HELP_DEBUG_LOG_LOOP_CLONING, TYP_VOID); Statement* stmt = fgNewStmtFromTree(logCall); fgInsertStmtBefore(block, insertBefore, stmt); fgMorphBlockStmt(block, stmt DEBUGARG("Debug log loop cloning")); } #endif // DEBUG //------------------------------------------------------------------------ // optPerformStaticOptimizations: Perform the optimizations for the optimization // candidates gathered during the cloning phase. // // Arguments: // loopNum - the current loop index for which the optimizations are performed. // context - data structure where all loop cloning info is kept. // dynamicPath - If true, the optimization is performed in the fast path among the // cloned loops. If false, it means this is the only path (i.e., // there is no slow path.) // // Operation: // Perform the optimizations on the fast path i.e., the path in which the // optimization candidates were collected at the time of identifying them. // The candidates store all the information necessary (the tree/stmt/block // they are from) to perform the optimization. // // Assumption: // The unoptimized path is either already cloned when this method is called or // there is no unoptimized path (got eliminated statically.) So this method // performs the optimizations assuming that the path in which the candidates // were collected is the fast path in which the optimizations will be performed. // void Compiler::optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool dynamicPath)) { JitExpandArrayStack<LcOptInfo*>* optInfos = context->GetLoopOptInfo(loopNum); assert(optInfos != nullptr); for (unsigned i = 0; i < optInfos->Size(); ++i) { LcOptInfo* optInfo = optInfos->Get(i); switch (optInfo->GetOptType()) { case LcOptInfo::LcJaggedArray: { LcJaggedArrayOptInfo* arrIndexInfo = optInfo->AsLcJaggedArrayOptInfo(); compCurBB = arrIndexInfo->arrIndex.useBlock; // Remove all bounds checks for this array up to (and including) `arrIndexInfo->dim`. So, if that is 1, // Remove rank 0 and 1 bounds checks. for (unsigned dim = 0; dim <= arrIndexInfo->dim; dim++) { GenTree* bndsChkNode = arrIndexInfo->arrIndex.bndsChks[dim]; #ifdef DEBUG if (verbose) { printf("Remove bounds check "); printTreeID(bndsChkNode->gtGetOp1()); printf(" for " FMT_STMT ", dim% d, ", arrIndexInfo->stmt->GetID(), dim); arrIndexInfo->arrIndex.Print(); printf(", bounds check nodes: "); arrIndexInfo->arrIndex.PrintBoundsCheckNodes(); printf("\n"); } #endif // DEBUG if (bndsChkNode->gtGetOp1()->OperIs(GT_BOUNDS_CHECK)) { // This COMMA node will only represent a bounds check if we've haven't already removed this // bounds check in some other nesting cloned loop. For example, consider: // for (i = 0; i < x; i++) // for (j = 0; j < y; j++) // a[i][j] = i + j; // If the outer loop is cloned first, it will remove the a[i] bounds check from the optimized // path. Later, when the inner loop is cloned, we want to remove the a[i][j] bounds check. If // we clone the inner loop, we know that the a[i] bounds check isn't required because we'll add // it to the loop cloning conditions. On the other hand, we can clone a loop where we get rid of // the nested bounds check but nobody has gotten rid of the outer bounds check. As before, we // know the outer bounds check is not needed because it's been added to the cloning conditions, // so we can get rid of the bounds check here. // optRemoveCommaBasedRangeCheck(bndsChkNode, arrIndexInfo->stmt); } else { JITDUMP(" Bounds check already removed\n"); // If the bounds check node isn't there, it better have been converted to a GT_NOP. assert(bndsChkNode->gtGetOp1()->OperIs(GT_NOP)); } } DBEXEC(dynamicPath, optDebugLogLoopCloning(arrIndexInfo->arrIndex.useBlock, arrIndexInfo->stmt)); } break; case LcOptInfo::LcMdArray: // TODO-CQ: CLONE: Implement. break; default: break; } } } //---------------------------------------------------------------------------- // optIsLoopClonable: Determine whether this loop can be cloned. // // Arguments: // loopInd loop index which needs to be checked if it can be cloned. // // Return Value: // Returns true if the loop can be cloned. If it returns false, // it prints a message to the JIT dump describing why the loop can't be cloned. // // Notes: if `true` is returned, then `fgReturnCount` is increased by the number of // return blocks in the loop that will be cloned. (REVIEW: this 'predicate' function // doesn't seem like the right place to do this change.) // bool Compiler::optIsLoopClonable(unsigned loopInd) { const LoopDsc& loop = optLoopTable[loopInd]; if (!(loop.lpFlags & LPFLG_ITER)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". No LPFLG_ITER flag.\n", loopInd); return false; } if (loop.lpFlags & LPFLG_REMOVED) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". It is marked LPFLG_REMOVED.\n", loopInd); return false; } // Make sure the loop doesn't have any embedded exception handling. // Walk the loop blocks from lexically first to lexically last (all blocks in this region must be // part of the loop), looking for a `try` begin block. Note that a loop must entirely contain any // EH region, or be itself entirely contained within an EH region. Thus, looking just for a `try` // begin is sufficient; there is no need to look for other EH constructs, such as a `catch` begin. // // TODO: this limitation could be removed if we do the work to insert new EH regions in the exception table, // for the cloned loop (and its embedded EH regions). // // Also, count the number of return blocks within the loop for future use. unsigned loopRetCount = 0; for (BasicBlock* const blk : loop.LoopBlocks()) { if (blk->bbJumpKind == BBJ_RETURN) { loopRetCount++; } if (bbIsTryBeg(blk)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". It has a `try` begin.\n", loopInd); return false; } } // Is the entry block a handler or filter start? If so, then if we cloned, we could create a jump // into the middle of a handler (to go to the cloned copy.) Reject. if (bbIsHandlerBeg(loop.lpEntry)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Entry block is a handler start.\n", loopInd); return false; } // If the head and entry are in different EH regions, reject. if (!BasicBlock::sameEHRegion(loop.lpHead, loop.lpEntry)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Head and entry blocks are in different EH regions.\n", loopInd); return false; } // Is the first block after the last block of the loop a handler or filter start? // Usually, we create a dummy block after the orginal loop, to skip over the loop clone // and go to where the original loop did. That raises problems when we don't actually go to // that block; this is one of those cases. This could be fixed fairly easily; for example, // we could add a dummy nop block after the (cloned) loop bottom, in the same handler scope as the // loop. This is just a corner to cut to get this working faster. BasicBlock* bbAfterLoop = loop.lpBottom->bbNext; if (bbAfterLoop != nullptr && bbIsHandlerBeg(bbAfterLoop)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Next block after bottom is a handler start.\n", loopInd); return false; } // We've previously made a decision whether to have separate return epilogs, or branch to one. // There's a GCInfo limitation in the x86 case, so that there can be no more than SET_EPILOGCNT_MAX separate // epilogs. Other architectures have a limit of 4 here for "historical reasons", but this should be revisited // (or return blocks should not be considered part of the loop, rendering this issue moot). unsigned epilogLimit = 4; #ifdef JIT32_GCENCODER epilogLimit = SET_EPILOGCNT_MAX; #endif // JIT32_GCENCODER if (fgReturnCount + loopRetCount > epilogLimit) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". It has %d returns;" " if added to previously existing %d returns, it would exceed the limit of %d.\n", loopInd, loopRetCount, fgReturnCount, epilogLimit); return false; } unsigned ivLclNum = loop.lpIterVar(); if (lvaVarAddrExposed(ivLclNum)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Rejected V%02u as iter var because is address-exposed.\n", loopInd, ivLclNum); return false; } BasicBlock* top = loop.lpTop; BasicBlock* bottom = loop.lpBottom; if (bottom->bbJumpKind != BBJ_COND) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Couldn't find termination test.\n", loopInd); return false; } if (bottom->bbJumpDest != top) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Branch at loop 'bottom' not looping to 'top'.\n", loopInd); return false; } // TODO-CQ: CLONE: Mark increasing or decreasing loops. if ((loop.lpIterOper() != GT_ADD) || (loop.lpIterConst() != 1)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Loop iteration operator not matching.\n", loopInd); return false; } if ((loop.lpFlags & LPFLG_CONST_LIMIT) == 0 && (loop.lpFlags & LPFLG_VAR_LIMIT) == 0 && (loop.lpFlags & LPFLG_ARRLEN_LIMIT) == 0) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Loop limit is neither constant, variable or array length.\n", loopInd); return false; } if (!((GenTree::StaticOperIs(loop.lpTestOper(), GT_LT, GT_LE) && (loop.lpIterOper() == GT_ADD)) || (GenTree::StaticOperIs(loop.lpTestOper(), GT_GT, GT_GE) && (loop.lpIterOper() == GT_SUB)))) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Loop test (%s) doesn't agree with the direction (%s) of the loop.\n", loopInd, GenTree::OpName(loop.lpTestOper()), GenTree::OpName(loop.lpIterOper())); return false; } if (!loop.lpTestTree->OperIsCompare() || !(loop.lpTestTree->gtFlags & GTF_RELOP_ZTT)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Loop inversion NOT present, loop test [%06u] may not protect " "entry from head.\n", loopInd, loop.lpTestTree->gtTreeID); return false; } #ifdef DEBUG GenTree* op1 = loop.lpIterator(); assert((op1->gtOper == GT_LCL_VAR) && (op1->AsLclVarCommon()->GetLclNum() == ivLclNum)); #endif // Otherwise, we're going to add those return blocks. fgReturnCount += loopRetCount; return true; } //-------------------------------------------------------------------------------------------------- // optInsertLoopChoiceConditions: Insert the loop conditions for a loop after the loop head. // // Arguments: // context loop cloning context variable // loopNum the loop index // slowHead the slow path loop head, where the condition failures branch // insertAfter insert the conditions after this block // // Return Value: // The last condition block added. // // Operation: // Create the following structure. // // h (fall through) // !cond0 -?> slowHead // !cond1 -?> slowHead // ... // !condn -?> slowHead // h2/entry (fast) // ... // slowHead -?> e2 (slowHead) branch or fall-through to e2 // BasicBlock* Compiler::optInsertLoopChoiceConditions(LoopCloneContext* context, unsigned loopNum, BasicBlock* slowHead, BasicBlock* insertAfter) { JITDUMP("Inserting loop " FMT_LP " loop choice conditions\n", loopNum); assert(context->HasBlockConditions(loopNum)); assert(slowHead != nullptr); assert(insertAfter->bbJumpKind == BBJ_NONE); JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* levelCond = context->GetBlockConditions(loopNum); for (unsigned i = 0; i < levelCond->Size(); ++i) { JITDUMP("Adding loop " FMT_LP " level %u block conditions\n ", loopNum, i); DBEXEC(verbose, context->PrintBlockLevelConditions(i, (*levelCond)[i])); insertAfter = context->CondToStmtInBlock(this, *((*levelCond)[i]), slowHead, insertAfter); } // Finally insert cloning conditions after all deref conditions have been inserted. JITDUMP("Adding loop " FMT_LP " cloning conditions\n ", loopNum); DBEXEC(verbose, context->PrintConditions(loopNum)); JITDUMP("\n"); insertAfter = context->CondToStmtInBlock(this, *(context->GetConditions(loopNum)), slowHead, insertAfter); return insertAfter; } //------------------------------------------------------------------------ // OptEnsureUniqueHead: Ensure that loop "loopInd" has a unique head block. // If the existing entry has non-loop predecessors other than the head entry, // create a new, empty block that goes (only) to the entry, and redirects the // preds of the entry to this new block. Sets the weight of the newly created // block to "ambientWeight". // // NOTE: this is currently dead code, because it is only called by loop cloning, // and loop cloning only works with single-entry loops where the immediately // preceding head block is the only predecessor of the loop entry. // // Arguments: // loopInd - index of loop to process // ambientWeight - weight to give the new head, if created. // void Compiler::optEnsureUniqueHead(unsigned loopInd, weight_t ambientWeight) { LoopDsc& loop = optLoopTable[loopInd]; BasicBlock* h = loop.lpHead; BasicBlock* t = loop.lpTop; BasicBlock* e = loop.lpEntry; BasicBlock* b = loop.lpBottom; // If "h" dominates the entry block, then it is the unique header. if (fgDominate(h, e)) { return; } // Otherwise, create a new empty header block, make it the pred of the entry block, // and redirect the preds of the entry block to go to this. BasicBlock* beforeTop = t->bbPrev; assert(!beforeTop->bbFallsThrough() || (beforeTop->bbNext == e)); // Make sure that the new block is in the same region as the loop. // (We will only create loops that are entirely within a region.) BasicBlock* h2 = fgNewBBafter(BBJ_NONE, beforeTop, /*extendRegion*/ true); assert(beforeTop->bbNext == h2); // This is in the containing loop. h2->bbNatLoopNum = loop.lpParent; h2->bbWeight = h2->isRunRarely() ? BB_ZERO_WEIGHT : ambientWeight; if (h2->bbNext != e) { h2->bbJumpKind = BBJ_ALWAYS; h2->bbJumpDest = e; } BlockSetOps::Assign(this, h2->bbReach, e->bbReach); fgAddRefPred(e, h2); // Redirect paths from preds of "e" to go to "h2" instead of "e". BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone)); blockMap->Set(e, h2); for (BasicBlock* const predBlock : e->PredBlocks()) { // Skip if predBlock is in the loop. if (t->bbNum <= predBlock->bbNum && predBlock->bbNum <= b->bbNum) { continue; } optRedirectBlock(predBlock, blockMap); fgAddRefPred(h2, predBlock); fgRemoveRefPred(e, predBlock); } optUpdateLoopHead(loopInd, h, h2); } //------------------------------------------------------------------------ // optCloneLoop: Perform the mechanical cloning of the specified loop // // Arguments: // loopInd - loop index of loop to clone // context - data structure where all loop cloning info is kept. // void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) { assert(loopInd < optLoopCount); LoopDsc& loop = optLoopTable[loopInd]; JITDUMP("\nCloning loop " FMT_LP ": [head: " FMT_BB ", top: " FMT_BB ", entry: " FMT_BB ", bottom: " FMT_BB ", child: " FMT_LP "].\n", loopInd, loop.lpHead->bbNum, loop.lpTop->bbNum, loop.lpEntry->bbNum, loop.lpBottom->bbNum, loop.lpChild); // Determine the depth of the loop, so we can properly weight blocks added (outside the cloned loop blocks). unsigned depth = optLoopDepth(loopInd); weight_t ambientWeight = 1; for (unsigned j = 0; j < depth; j++) { weight_t lastWeight = ambientWeight; ambientWeight *= BB_LOOP_WEIGHT_SCALE; assert(ambientWeight > lastWeight); } // If we're in a non-natural loop, the ambient weight might be higher than we computed above. // Be safe by taking the max with the head block's weight. ambientWeight = max(ambientWeight, loop.lpHead->bbWeight); // We assume that the fast path will run 99% of the time, and thus should get 99% of the block weights. // The slow path will, correspondingly, get only 1% of the block weights. It could be argued that we should // mark the slow path as "run rarely", since it really shouldn't execute (given the currently optimized loop // conditions) except under exceptional circumstances. const weight_t fastPathWeightScaleFactor = 0.99; const weight_t slowPathWeightScaleFactor = 1.0 - fastPathWeightScaleFactor; // This is the containing loop, if any -- to label any blocks we create that are outside // the loop being cloned. unsigned char ambientLoop = loop.lpParent; // First, make sure that the loop has a unique header block, creating an empty one if necessary. optEnsureUniqueHead(loopInd, ambientWeight); // We're going to transform this loop: // // H --> E (or, H conditionally branches around the loop and has fall-through to F == T == E) // F // T // E // B ?-> T // X // // to this pair of loops: // // H ?-> H3 (all loop failure conditions branch to new slow path loop head) // H2--> E (Optional; if E == T == F, let H fall through to F/T/E) // F // T // E // B ?-> T // X2--> X // H3 --> E2 (aka slowHead. Or, H3 falls through to F2 == T2 == E2) // F2 // T2 // E2 // B2 ?-> T2 // X BasicBlock* h = loop.lpHead; if (!h->KindIs(BBJ_NONE, BBJ_ALWAYS)) { // Make a new block to be the unique entry to the loop. JITDUMP("Create new unique single-successor entry to loop\n"); assert((h->bbJumpKind == BBJ_COND) && (h->bbNext == loop.lpEntry)); BasicBlock* newH = fgNewBBafter(BBJ_NONE, h, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " after " FMT_BB "\n", newH->bbNum, h->bbNum); newH->bbWeight = newH->isRunRarely() ? BB_ZERO_WEIGHT : ambientWeight; BlockSetOps::Assign(this, newH->bbReach, h->bbReach); // This is in the scope of a surrounding loop, if one exists -- the parent of the loop we're cloning. newH->bbNatLoopNum = ambientLoop; optUpdateLoopHead(loopInd, h, newH); fgAddRefPred(newH, h); // Add h->newH pred edge JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", h->bbNum, newH->bbNum); fgReplacePred(newH->bbNext, h, newH); // Replace pred in COND fall-through block. JITDUMP("Replace " FMT_BB " -> " FMT_BB " with " FMT_BB " -> " FMT_BB "\n", h->bbNum, newH->bbNext->bbNum, newH->bbNum, newH->bbNext->bbNum); h = newH; } assert(h == loop.lpHead); // Make X2 after B, if necessary. (Not necessary if B is a BBJ_ALWAYS.) // "newPred" will be the predecessor of the blocks of the cloned loop. BasicBlock* b = loop.lpBottom; BasicBlock* newPred = b; if (b->bbJumpKind != BBJ_ALWAYS) { assert(b->bbJumpKind == BBJ_COND); BasicBlock* x = b->bbNext; if (x != nullptr) { JITDUMP("Create branch around cloned loop\n"); BasicBlock* x2 = fgNewBBafter(BBJ_ALWAYS, b, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " after " FMT_BB "\n", x2->bbNum, b->bbNum); x2->bbWeight = x2->isRunRarely() ? BB_ZERO_WEIGHT : ambientWeight; // This is in the scope of a surrounding loop, if one exists -- the parent of the loop we're cloning. x2->bbNatLoopNum = ambientLoop; x2->bbJumpDest = x; BlockSetOps::Assign(this, x2->bbReach, h->bbReach); fgAddRefPred(x2, b); // Add b->x2 pred edge JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", b->bbNum, x2->bbNum); fgReplacePred(x, b, x2); // The pred of x is now x2, not the fall-through of COND b. JITDUMP("Replace " FMT_BB " -> " FMT_BB " with " FMT_BB " -> " FMT_BB "\n", b->bbNum, x->bbNum, x2->bbNum, x->bbNum); newPred = x2; } } // We're going to create a new loop head for the slow loop immediately before the slow loop itself. All failed // conditions will branch to the slow head. The slow head will either fall through to the entry, or unconditionally // branch to the slow path entry. This puts the slow loop in the canonical loop form. BasicBlock* slowHeadPrev = newPred; // Now we'll make "h2", after "h" to go to "e" -- unless the loop is a do-while, // so that "h" already falls through to "e" (e == t == f). // It might look like this code is unreachable, since "h" must be a BBJ_ALWAYS, but // later we will change "h" to a BBJ_COND along with a set of loop conditions. // TODO: it still might be unreachable, since cloning currently is restricted to "do-while" loop forms. BasicBlock* h2 = nullptr; if (h->bbNext != loop.lpEntry) { assert(h->bbJumpKind == BBJ_ALWAYS); JITDUMP("Create branch to entry of optimized loop\n"); BasicBlock* h2 = fgNewBBafter(BBJ_ALWAYS, h, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " after " FMT_BB "\n", h2->bbNum, h->bbNum); h2->bbWeight = h2->isRunRarely() ? BB_ZERO_WEIGHT : ambientWeight; // This is in the scope of a surrounding loop, if one exists -- the parent of the loop we're cloning. h2->bbNatLoopNum = ambientLoop; h2->bbJumpDest = loop.lpEntry; fgReplacePred(loop.lpEntry, h, h2); JITDUMP("Replace " FMT_BB " -> " FMT_BB " with " FMT_BB " -> " FMT_BB "\n", h->bbNum, loop.lpEntry->bbNum, h2->bbNum, loop.lpEntry->bbNum); optUpdateLoopHead(loopInd, h, h2); // NOTE: 'h' is no longer the loop head; 'h2' is! } // Now we'll clone the blocks of the loop body. These cloned blocks will be the slow path. BasicBlock* newFirst = nullptr; BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone)); for (BasicBlock* const blk : loop.LoopBlocks()) { BasicBlock* newBlk = fgNewBBafter(blk->bbJumpKind, newPred, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " (copy of " FMT_BB ") after " FMT_BB "\n", newBlk->bbNum, blk->bbNum, newPred->bbNum); // Call CloneBlockState to make a copy of the block's statements (and attributes), and assert that it // has a return value indicating success, because optCanOptimizeByLoopCloningVisitor has already // checked them to guarantee they are clonable. bool cloneOk = BasicBlock::CloneBlockState(this, newBlk, blk); noway_assert(cloneOk); // We're going to create the preds below, which will set the bbRefs properly, // so clear out the cloned bbRefs field. newBlk->bbRefs = 0; newBlk->scaleBBWeight(slowPathWeightScaleFactor); blk->scaleBBWeight(fastPathWeightScaleFactor); // TODO: scale the pred edges of `blk`? #if FEATURE_LOOP_ALIGN // If the original loop is aligned, do not align the cloned loop because cloned loop will be executed in // rare scenario. Additionally, having to align cloned loop will force us to disable some VEX prefix encoding // and adding compensation for over-estimated instructions. if (blk->isLoopAlign()) { newBlk->bbFlags &= ~BBF_LOOP_ALIGN; JITDUMP("Removing LOOP_ALIGN flag from cloned loop in " FMT_BB "\n", newBlk->bbNum); } #endif // TODO-Cleanup: The above clones the bbNatLoopNum, which is incorrect. Eventually, we should probably insert // the cloned loop in the loop table. For now, however, we'll just make these blocks be part of the surrounding // loop, if one exists -- the parent of the loop we're cloning. newBlk->bbNatLoopNum = loop.lpParent; if (newFirst == nullptr) { newFirst = newBlk; } newPred = newBlk; blockMap->Set(blk, newBlk); } // Perform the static optimizations on the fast path. optPerformStaticOptimizations(loopInd, context DEBUGARG(true)); // Now go through the new blocks, remapping their jump targets within the loop // and updating the preds lists. for (BasicBlock* const blk : loop.LoopBlocks()) { BasicBlock* newblk = nullptr; bool b = blockMap->Lookup(blk, &newblk); assert(b && newblk != nullptr); assert(blk->bbJumpKind == newblk->bbJumpKind); // First copy the jump destination(s) from "blk". optCopyBlkDest(blk, newblk); // Now redirect the new block according to "blockMap". optRedirectBlock(newblk, blockMap); // Add predecessor edges for the new successors, as well as the fall-through paths. switch (newblk->bbJumpKind) { case BBJ_NONE: fgAddRefPred(newblk->bbNext, newblk); break; case BBJ_ALWAYS: case BBJ_CALLFINALLY: fgAddRefPred(newblk->bbJumpDest, newblk); break; case BBJ_COND: fgAddRefPred(newblk->bbNext, newblk); fgAddRefPred(newblk->bbJumpDest, newblk); break; case BBJ_SWITCH: for (BasicBlock* const switchDest : newblk->SwitchTargets()) { fgAddRefPred(switchDest, newblk); } break; default: break; } } #ifdef DEBUG // Display the preds for the new blocks, after all the new blocks have been redirected. JITDUMP("Preds after loop copy:\n"); for (BasicBlock* const blk : loop.LoopBlocks()) { BasicBlock* newblk = nullptr; bool b = blockMap->Lookup(blk, &newblk); assert(b && newblk != nullptr); JITDUMP(FMT_BB ":", newblk->bbNum); for (BasicBlock* const predBlock : newblk->PredBlocks()) { JITDUMP(" " FMT_BB, predBlock->bbNum); } JITDUMP("\n"); } #endif // DEBUG // Insert the loop choice conditions. We will create the following structure: // // h (fall through) // !cond0 -?> slowHead // !cond1 -?> slowHead // ... // !condn -?> slowHead // h2/entry (fast) // ... // slowHead -?> e2 (slowHead) branch or fall-through to e2 // // We should always have block conditions; at the minimum, the array should be deref-able. assert(context->HasBlockConditions(loopInd)); if (h->bbJumpKind == BBJ_NONE) { assert(h->bbNext == loop.lpEntry); fgRemoveRefPred(h->bbNext, h); } else { assert(h->bbJumpKind == BBJ_ALWAYS); assert(h->bbJumpDest == loop.lpEntry); assert(h2 != nullptr); h->bbJumpKind = BBJ_NONE; h->bbJumpDest = nullptr; } // If any condition is false, go to slowHead (which branches or falls through to e2). BasicBlock* e2 = nullptr; bool foundIt = blockMap->Lookup(loop.lpEntry, &e2); assert(foundIt && e2 != nullptr); // Create a unique header for the slow path. JITDUMP("Create unique head block for slow path loop\n"); BasicBlock* slowHead = fgNewBBafter(BBJ_NONE, slowHeadPrev, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " after " FMT_BB "\n", slowHead->bbNum, slowHeadPrev->bbNum); slowHead->bbWeight = slowHeadPrev->isRunRarely() ? BB_ZERO_WEIGHT : ambientWeight; slowHead->scaleBBWeight(slowPathWeightScaleFactor); slowHead->bbNatLoopNum = ambientLoop; if (slowHead->bbNext != e2) { // We can't just fall through to the slow path entry, so make it an unconditional branch. slowHead->bbJumpKind = BBJ_ALWAYS; slowHead->bbJumpDest = e2; } fgAddRefPred(e2, slowHead); JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", slowHead->bbNum, e2->bbNum); BasicBlock* condLast = optInsertLoopChoiceConditions(context, loopInd, slowHead, h); // Add the fall-through path pred (either to F/T/E for fall-through from conditions to fast path, // or H2 if branch to E of fast path). assert(condLast->bbJumpKind == BBJ_COND); JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", condLast->bbNum, condLast->bbNext->bbNum); fgAddRefPred(condLast->bbNext, condLast); // If h2 is present it is already the head. Else, replace 'h' as the loop head by 'condLast'. if (h2 == nullptr) { optUpdateLoopHead(loopInd, loop.lpHead, condLast); } // Don't unroll loops that we've cloned -- the unroller expects any loop it should unroll to // initialize the loop counter immediately before entering the loop, but we've left a shared // initialization of the loop counter up above the test that determines which version of the // loop to take. loop.lpFlags |= LPFLG_DONT_UNROLL; } //------------------------------------------------------------------------- // optIsStackLocalInvariant: Is stack local invariant in loop. // // Arguments: // loopNum The loop in which the variable is tested for invariance. // lclNum The local that is tested for invariance in the loop. // // Return Value: // Returns true if the variable is loop invariant in loopNum. // bool Compiler::optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum) { if (lvaVarAddrExposed(lclNum)) { return false; } if (optIsVarAssgLoop(loopNum, lclNum)) { return false; } return true; } //--------------------------------------------------------------------------------------------------------------- // optExtractArrIndex: Try to extract the array index from "tree". // // Arguments: // tree the tree to be checked if it is the array [] operation. // result the extracted GT_INDEX information is updated in result. // lhsNum for the root level (function is recursive) callers should pass BAD_VAR_NUM. // // Return Value: // Returns true if array index can be extracted, else, return false. See assumption about // what will be extracted. The "result" variable's rank parameter is advanced for every // dimension of [] encountered. // // Operation: // Given a "tree" extract the GT_INDEX node in "result" as ArrIndex. In morph // we have converted a GT_INDEX tree into a scaled index base offset expression. // However, we don't actually bother to parse the morphed tree. All we care about is // the bounds check node: it contains the array base and element index. The other side // of the COMMA node can vary between array of primitive type and array of struct. There's // no need to parse that, as the array bounds check contains the only thing we care about. // In particular, we are trying to find bounds checks to remove, so only looking at the bounds // check makes sense. We could verify that the bounds check is against the same array base/index // but it isn't necessary. // // Assumption: // The method extracts only if the array base and indices are GT_LCL_VAR. // // TODO-CQ: CLONE: After morph make sure this method extracts values before morph. // // Example tree to pattern match: // // * COMMA int // +--* BOUNDS_CHECK_Rng void // | +--* LCL_VAR int V02 loc1 // | \--* ARR_LENGTH int // | \--* LCL_VAR ref V00 arg0 // \--* IND int // \--* ADD byref // +--* LCL_VAR ref V00 arg0 // \--* ADD long // +--* LSH long // | +--* CAST long <- int // | | \--* LCL_VAR int V02 loc1 // | \--* CNS_INT long 2 // \--* CNS_INT long 16 Fseq[#FirstElem] // // Note that byte arrays don't require the LSH to scale the index, so look like this: // // * COMMA ubyte // +--* BOUNDS_CHECK_Rng void // | +--* LCL_VAR int V03 loc2 // | \--* ARR_LENGTH int // | \--* LCL_VAR ref V00 arg0 // \--* IND ubyte // \--* ADD byref // +--* LCL_VAR ref V00 arg0 // \--* ADD long // +--* CAST long <- int // | \--* LCL_VAR int V03 loc2 // \--* CNS_INT long 16 Fseq[#FirstElem] // // The COMMA op2 expression is the array index expression (or SIMD/Span expression). If we've got // a "LCL_VAR int" index and "ARR_LENGTH(LCL_VAR ref)", that's good enough for us: we'll assume // op2 is an array index expression. We don't need to match it just to ensure the index var is // used as an index expression, or array base var is used as the array base. This saves us from parsing // all the forms that morph can create, especially for arrays of structs. // bool Compiler::optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum) { if (tree->gtOper != GT_COMMA) { return false; } GenTree* before = tree->gtGetOp1(); if (!before->OperIs(GT_BOUNDS_CHECK)) { return false; } GenTreeBoundsChk* arrBndsChk = before->AsBoundsChk(); if (arrBndsChk->GetIndex()->gtOper != GT_LCL_VAR) { return false; } // For span we may see the array length is a local var or local field or constant. // We won't try and extract those. if (arrBndsChk->GetArrayLength()->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_CNS_INT)) { return false; } if (arrBndsChk->GetArrayLength()->gtGetOp1()->gtOper != GT_LCL_VAR) { return false; } unsigned arrLcl = arrBndsChk->GetArrayLength()->gtGetOp1()->AsLclVarCommon()->GetLclNum(); if (lhsNum != BAD_VAR_NUM && arrLcl != lhsNum) { return false; } unsigned indLcl = arrBndsChk->GetIndex()->AsLclVarCommon()->GetLclNum(); if (lhsNum == BAD_VAR_NUM) { result->arrLcl = arrLcl; } result->indLcls.Push(indLcl); result->bndsChks.Push(tree); result->useBlock = compCurBB; result->rank++; return true; } //--------------------------------------------------------------------------------------------------------------- // optReconstructArrIndex: Reconstruct array index. // // Arguments: // tree the tree to be checked if it is an array [][][] operation. // result OUT: the extracted GT_INDEX information. // lhsNum for the root level (function is recursive) callers should pass BAD_VAR_NUM. // // Return Value: // Returns true if array index can be extracted, else, return false. "rank" field in // "result" contains the array access depth. The "indLcls" fields contain the indices. // // Operation: // Recursively look for a list of array indices. For example, if the tree is // V03 = (V05 = V00[V01]), V05[V02] // that corresponds to access of V00[V01][V02]. The return value would then be: // ArrIndex result { arrLcl: V00, indLcls: [V01, V02], rank: 2 } // // Note that the array expression is implied by the array bounds check under the COMMA, and the array bounds // checks is what is parsed from the morphed tree; the array addressing expression is not parsed. // // Assumption: // The method extracts only if the array base and indices are GT_LCL_VAR. // bool Compiler::optReconstructArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum) { // If we can extract "tree" (which is a top level comma) return. if (optExtractArrIndex(tree, result, lhsNum)) { return true; } // We have a comma (check if array base expr is computed in "before"), descend further. else if (tree->OperGet() == GT_COMMA) { GenTree* before = tree->gtGetOp1(); // "before" should evaluate an array base for the "after" indexing. if (before->OperGet() != GT_ASG) { return false; } GenTree* lhs = before->gtGetOp1(); GenTree* rhs = before->gtGetOp2(); // "rhs" should contain an GT_INDEX if (!lhs->IsLocal() || !optReconstructArrIndex(rhs, result, lhsNum)) { return false; } unsigned lhsNum = lhs->AsLclVarCommon()->GetLclNum(); GenTree* after = tree->gtGetOp2(); // Pass the "lhsNum", so we can verify if indeed it is used as the array base. return optExtractArrIndex(after, result, lhsNum); } return false; } //---------------------------------------------------------------------------------------------- // optCanOptimizeByLoopCloning: Check if the tree can be optimized by loop cloning and if so, // identify as potential candidate and update the loop context. // // Arguments: // tree The tree encountered during the tree walk. // info Supplies information about the current block or stmt in which the tree is. // Also supplies the "context" pointer for updating with loop cloning // candidates. Also supplies loopNum. // // Operation: // If array index can be reconstructed, check if the iteration var of the loop matches the // array index var in some dimension. Also ensure other index vars before the identified // dimension are loop invariant. // // Return Value: // Skip sub trees if the optimization candidate is identified or else continue walking // Compiler::fgWalkResult Compiler::optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info) { ArrIndex arrIndex(getAllocator(CMK_LoopClone)); // Check if array index can be optimized. if (optReconstructArrIndex(tree, &arrIndex, BAD_VAR_NUM)) { assert(tree->gtOper == GT_COMMA); #ifdef DEBUG if (verbose) { printf("Found ArrIndex at " FMT_BB " " FMT_STMT " tree ", arrIndex.useBlock->bbNum, info->stmt->GetID()); printTreeID(tree); printf(" which is equivalent to: "); arrIndex.Print(); printf(", bounds check nodes: "); arrIndex.PrintBoundsCheckNodes(); printf("\n"); } #endif // Check that the array object local variable is invariant within the loop body. if (!optIsStackLocalInvariant(info->loopNum, arrIndex.arrLcl)) { JITDUMP("V%02d is not loop invariant\n", arrIndex.arrLcl); return WALK_SKIP_SUBTREES; } // Walk the dimensions and see if iterVar of the loop is used as index. for (unsigned dim = 0; dim < arrIndex.rank; ++dim) { // Is index variable also used as the loop iter var? if (arrIndex.indLcls[dim] == optLoopTable[info->loopNum].lpIterVar()) { // Check the previous indices are all loop invariant. for (unsigned dim2 = 0; dim2 < dim; ++dim2) { if (optIsVarAssgLoop(info->loopNum, arrIndex.indLcls[dim2])) { JITDUMP("V%02d is assigned in loop\n", arrIndex.indLcls[dim2]); return WALK_SKIP_SUBTREES; } } #ifdef DEBUG if (verbose) { printf("Loop " FMT_LP " can be cloned for ArrIndex ", info->loopNum); arrIndex.Print(); printf(" on dim %d\n", dim); } #endif // Update the loop context. info->context->EnsureLoopOptInfo(info->loopNum) ->Push(new (this, CMK_LoopOpt) LcJaggedArrayOptInfo(arrIndex, dim, info->stmt)); } else { JITDUMP("Induction V%02d is not used as index on dim %d\n", optLoopTable[info->loopNum].lpIterVar(), dim); } } return WALK_SKIP_SUBTREES; } else if (tree->gtOper == GT_ARR_ELEM) { // TODO-CQ: CLONE: Implement. return WALK_SKIP_SUBTREES; } return WALK_CONTINUE; } /* static */ Compiler::fgWalkResult Compiler::optCanOptimizeByLoopCloningVisitor(GenTree** pTree, Compiler::fgWalkData* data) { return data->compiler->optCanOptimizeByLoopCloning(*pTree, (LoopCloneVisitorInfo*)data->pCallbackData); } //------------------------------------------------------------------------ // optIdentifyLoopOptInfo: Identify loop optimization candidates. // Also, check if the loop is suitable for the optimizations performed. // // Arguments: // loopNum - the current loop index for which conditions are derived. // context - data structure where all loop cloning candidates will be updated. // // Return Value: // If the loop is not suitable for the optimizations, return false - context // should not contain any optimization candidate for the loop if false. // Else return true. // // Operation: // Check if the loop is well formed for this optimization and identify the // optimization candidates and update the "context" parameter with all the // contextual information necessary to perform the optimization later. // bool Compiler::optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context) { JITDUMP("Checking loop " FMT_LP " for optimization candidates\n", loopNum); const LoopDsc& loop = optLoopTable[loopNum]; LoopCloneVisitorInfo info(context, loopNum, nullptr); for (BasicBlock* const block : loop.LoopBlocks()) { compCurBB = block; for (Statement* const stmt : block->Statements()) { info.stmt = stmt; const bool lclVarsOnly = false; const bool computeStack = false; fgWalkTreePre(stmt->GetRootNodePointer(), optCanOptimizeByLoopCloningVisitor, &info, lclVarsOnly, computeStack); } } return true; } //------------------------------------------------------------------------------ // optObtainLoopCloningOpts: Identify optimization candidates and update // the "context" for array optimizations. // // Arguments: // context - data structure where all loop cloning info is kept. The // optInfo fields of the context are updated with the // identified optimization candidates. // // Returns: // true if there are any clonable loops. // bool Compiler::optObtainLoopCloningOpts(LoopCloneContext* context) { bool result = false; for (unsigned i = 0; i < optLoopCount; i++) { JITDUMP("Considering loop " FMT_LP " to clone for optimizations.\n", i); if (optIsLoopClonable(i)) { if (optIdentifyLoopOptInfo(i, context)) { result = true; } } JITDUMP("------------------------------------------------------------\n"); } JITDUMP("\n"); return result; } //---------------------------------------------------------------------------- // optLoopCloningEnabled: Determine whether loop cloning is allowed. It is allowed // in release builds. For debug builds, use the value of the COMPlus_JitCloneLoops // flag (which defaults to 1, or allowed). // // Return Value: // true if loop cloning is allowed, false if disallowed. // bool Compiler::optLoopCloningEnabled() { #ifdef DEBUG return JitConfig.JitCloneLoops() != 0; #else return true; #endif } //------------------------------------------------------------------------ // optCloneLoops: Implements loop cloning optimization. // // Identify loop cloning opportunities, derive loop cloning conditions, // perform loop cloning, use the derived conditions to choose which // path to take. // // Returns: // suitable phase status // PhaseStatus Compiler::optCloneLoops() { JITDUMP("\n*************** In optCloneLoops()\n"); if (optLoopCount == 0) { JITDUMP(" No loops to clone\n"); return PhaseStatus::MODIFIED_NOTHING; } if (!optLoopCloningEnabled()) { JITDUMP(" Loop cloning disabled\n"); return PhaseStatus::MODIFIED_NOTHING; } #ifdef DEBUG if (verbose) { printf("\nBefore loop cloning:\n"); fgDispBasicBlocks(/*dumpTrees*/ true); } #endif LoopCloneContext context(optLoopCount, getAllocator(CMK_LoopClone)); // Obtain array optimization candidates in the context. if (!optObtainLoopCloningOpts(&context)) { JITDUMP(" No clonable loops\n"); // TODO: if we can verify that the IR was not modified, we can return PhaseStatus::MODIFIED_NOTHING return PhaseStatus::MODIFIED_EVERYTHING; } unsigned optStaticallyOptimizedLoops = 0; // For each loop, derive cloning conditions for the optimization candidates. for (unsigned i = 0; i < optLoopCount; ++i) { JitExpandArrayStack<LcOptInfo*>* optInfos = context.GetLoopOptInfo(i); if (optInfos == nullptr) { continue; } if (!optDeriveLoopCloningConditions(i, &context) || !optComputeDerefConditions(i, &context)) { JITDUMP("> Conditions could not be obtained\n"); context.CancelLoopOptInfo(i); } else { bool allTrue = false; bool anyFalse = false; context.EvaluateConditions(i, &allTrue, &anyFalse DEBUGARG(verbose)); if (anyFalse) { context.CancelLoopOptInfo(i); } else if (allTrue) { // Perform static optimizations on the fast path since we always // have to take the cloned path. optPerformStaticOptimizations(i, &context DEBUGARG(false)); ++optStaticallyOptimizedLoops; // No need to clone. context.CancelLoopOptInfo(i); } } } #if 0 // The code in this #if has been useful in debugging loop cloning issues, by // enabling selective enablement of the loop cloning optimization according to // method hash. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("loopclonehashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); // methHashLo = (unsigned(atoi(lostr)) << 2); // So we don't have to use negative numbers. } char* histr = getenv("loopclonehashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); // methHashHi = (unsigned(atoi(histr)) << 2); // So we don't have to use negative numbers. } if (methHash < methHashLo || methHash > methHashHi) { return PhaseStatus::MODIFIED_EVERYTHING; } #endif #endif assert(optLoopsCloned == 0); // It should be initialized, but not yet changed. for (unsigned i = 0; i < optLoopCount; ++i) { if (context.GetLoopOptInfo(i) != nullptr) { optLoopsCloned++; context.OptimizeConditions(i DEBUGARG(verbose)); context.OptimizeBlockConditions(i DEBUGARG(verbose)); optCloneLoop(i, &context); } } if (optLoopsCloned > 0) { JITDUMP("Recompute reachability and dominators after loop cloning\n"); constexpr bool computePreds = false; // TODO: recompute the loop table, to include the slow loop path in the table? fgUpdateChangedFlowGraph(computePreds); } #ifdef DEBUG if (verbose) { printf("Loops cloned: %d\n", optLoopsCloned); printf("Loops statically optimized: %d\n", optStaticallyOptimizedLoops); printf("After loop cloning:\n"); fgDispBasicBlocks(/*dumpTrees*/ true); } fgDebugCheckLoopTable(); #endif return PhaseStatus::MODIFIED_EVERYTHING; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LoopCloning XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef DEBUG //-------------------------------------------------------------------------------------------------- // ArrIndex::Print - debug print an ArrIndex struct in form: `V01[V02][V03]`. // // Arguments: // dim (Optional) Print up to but not including this dimension. Default: print all dimensions. // void ArrIndex::Print(unsigned dim /* = -1 */) { printf("V%02d", arrLcl); for (unsigned i = 0; i < ((dim == (unsigned)-1) ? rank : dim); ++i) { printf("[V%02d]", indLcls.Get(i)); } } //-------------------------------------------------------------------------------------------------- // ArrIndex::PrintBoundsCheckNodes - debug print an ArrIndex struct bounds check node tree ids in // form: `[000125][000113]`. // // Arguments: // dim (Optional) Print up to but not including this dimension. Default: print all dimensions. // void ArrIndex::PrintBoundsCheckNodes(unsigned dim /* = -1 */) { for (unsigned i = 0; i < ((dim == (unsigned)-1) ? rank : dim); ++i) { Compiler::printTreeID(bndsChks.Get(i)); } } #endif // DEBUG //-------------------------------------------------------------------------------------------------- // ToGenTree - Convert an arrLen operation into a gentree node. // // Arguments: // comp Compiler instance to allocate trees // bb Basic block of the new tree // // Return Values: // Returns the gen tree representation for arrLen or MD Array node as defined by // the "type" member // // Notes: // This tree produces GT_INDEX node, the caller is supposed to morph it appropriately // so it can be codegen'ed. // GenTree* LC_Array::ToGenTree(Compiler* comp, BasicBlock* bb) { // If jagged array if (type == Jagged) { // Create a a[i][j][k].length type node. GenTree* arr = comp->gtNewLclvNode(arrIndex->arrLcl, comp->lvaTable[arrIndex->arrLcl].lvType); int rank = GetDimRank(); for (int i = 0; i < rank; ++i) { arr = comp->gtNewIndexRef(TYP_REF, arr, comp->gtNewLclvNode(arrIndex->indLcls[i], comp->lvaTable[arrIndex->indLcls[i]].lvType)); // Clear the range check flag and mark the index as non-faulting: we guarantee that all necessary range // checking has already been done by the time this array index expression is invoked. arr->gtFlags &= ~(GTF_INX_RNGCHK | GTF_EXCEPT); arr->gtFlags |= GTF_INX_NOFAULT; } // If asked for arrlen invoke arr length operator. if (oper == ArrLen) { GenTree* arrLen = comp->gtNewArrLen(TYP_INT, arr, OFFSETOF__CORINFO_Array__length, bb); // We already guaranteed (by a sequence of preceding checks) that the array length operator will not // throw an exception because we null checked the base array. // So, we should be able to do the following: // arrLen->gtFlags &= ~GTF_EXCEPT; // arrLen->gtFlags |= GTF_IND_NONFAULTING; // However, we then end up with a mix of non-faulting array length operators as well as normal faulting // array length operators in the slow-path of the cloned loops. CSE doesn't keep these separate, so bails // out on creating CSEs on this very useful type of CSE, leading to CQ losses in the cloned loop fast path. // TODO-CQ: fix this. return arrLen; } else { assert(oper == None); return arr; } } else { // TODO-CQ: Optimize for MD Array. assert(!"Optimize for MD Array"); } return nullptr; } //-------------------------------------------------------------------------------------------------- // ToGenTree - Convert an "identifier" into a gentree node. // // Arguments: // comp Compiler instance to allocate trees // bb Basic block of the new tree // // Return Values: // Returns the gen tree representation for either a constant or a variable or an arrLen operation // defined by the "type" member // GenTree* LC_Ident::ToGenTree(Compiler* comp, BasicBlock* bb) { // Convert to GenTree nodes. switch (type) { case Const: assert(constant <= INT32_MAX); return comp->gtNewIconNode(constant); case Var: return comp->gtNewLclvNode(constant, comp->lvaTable[constant].lvType); case ArrLen: return arrLen.ToGenTree(comp, bb); case Null: return comp->gtNewIconNode(0, TYP_REF); default: assert(!"Could not convert LC_Ident to GenTree"); unreached(); break; } } //-------------------------------------------------------------------------------------------------- // ToGenTree - Convert an "expression" into a gentree node. // // Arguments: // comp Compiler instance to allocate trees // bb Basic block of the new tree // // Return Values: // Returns the gen tree representation for either a constant or a variable or an arrLen operation // defined by the "type" member // GenTree* LC_Expr::ToGenTree(Compiler* comp, BasicBlock* bb) { // Convert to GenTree nodes. switch (type) { case Ident: return ident.ToGenTree(comp, bb); default: assert(!"Could not convert LC_Expr to GenTree"); unreached(); break; } } //-------------------------------------------------------------------------------------------------- // ToGenTree - Convert a "condition" into a gentree node. // // Arguments: // comp Compiler instance to allocate trees // bb Basic block of the new tree // invert `true` if the condition should be inverted // // Return Values: // Returns the GenTree representation for the conditional operator on lhs and rhs trees // GenTree* LC_Condition::ToGenTree(Compiler* comp, BasicBlock* bb, bool invert) { GenTree* op1Tree = op1.ToGenTree(comp, bb); GenTree* op2Tree = op2.ToGenTree(comp, bb); assert(genTypeSize(genActualType(op1Tree->TypeGet())) == genTypeSize(genActualType(op2Tree->TypeGet()))); return comp->gtNewOperNode(invert ? GenTree::ReverseRelop(oper) : oper, TYP_INT, op1Tree, op2Tree); } //-------------------------------------------------------------------------------------------------- // Evaluates - Evaluate a given loop cloning condition if it can be statically evaluated. // // Arguments: // pResult OUT parameter. The evaluation result // // Return Values: // Returns true if the condition can be statically evaluated. If the condition's result // is statically unknown then return false. In other words, `*pResult` is valid only if the // function returns true. // bool LC_Condition::Evaluates(bool* pResult) { switch (oper) { case GT_EQ: case GT_GE: case GT_LE: // If op1 == op2 then equality should result in true. if (op1 == op2) { *pResult = true; return true; } break; case GT_GT: case GT_LT: case GT_NE: // If op1 == op2 then inequality should result in false. if (op1 == op2) { *pResult = false; return true; } break; default: // for all other 'oper' kinds, we will return false break; } return false; } //-------------------------------------------------------------------------------------------------- // Combines - Check whether two conditions would combine to yield a single new condition. // // Arguments: // cond The condition that is checked if it would combine with "*this" condition. // newCond The resulting combined condition. // // Return Values: // Returns true if "cond" combines with the "this" condition. // "newCond" contains the combines condition. // // Operation: // Check if both conditions are equal. If so, return just 1 of them. // Reverse their operators and check if their reversed operands match. If so, return either of them. // // Notes: // This is not a full-fledged expression optimizer, it is supposed // to remove redundant conditions that are generated for optimization // opportunities. Anything further should be implemented as needed. // For example, for (i = beg; i < end; i += inc) a[i]. Then, the conditions // would be: "beg >= 0, end <= a.len, inc > 0" bool LC_Condition::Combines(const LC_Condition& cond, LC_Condition* newCond) { if (oper == cond.oper && op1 == cond.op1 && op2 == cond.op2) { *newCond = *this; return true; } else if ((oper == GT_LT || oper == GT_LE || oper == GT_GT || oper == GT_GE) && GenTree::ReverseRelop(oper) == cond.oper && op1 == cond.op2 && op2 == cond.op1) { *newCond = *this; return true; } return false; } //-------------------------------------------------------------------------------------------------- // GetLoopOptInfo - Retrieve the loop opt info candidate array. // // Arguments: // loopNum the loop index. // // Return Values: // Return the optInfo array member. The method doesn't allocate memory. // JitExpandArrayStack<LcOptInfo*>* LoopCloneContext::GetLoopOptInfo(unsigned loopNum) { return optInfo[loopNum]; } //-------------------------------------------------------------------------------------------------- // CancelLoopOptInfo - Cancel loop cloning optimization for this loop. // // Arguments: // loopNum the loop index. // // Return Values: // None. // void LoopCloneContext::CancelLoopOptInfo(unsigned loopNum) { JITDUMP("Cancelling loop cloning for loop " FMT_LP "\n", loopNum); optInfo[loopNum] = nullptr; if (conditions[loopNum] != nullptr) { conditions[loopNum]->Reset(); conditions[loopNum] = nullptr; } } //-------------------------------------------------------------------------------------------------- // EnsureLoopOptInfo - Retrieve the loop opt info candidate array, if it is not present, allocate // memory. // // Arguments: // loopNum the loop index. // // Return Values: // The array of optimization candidates for the loop. // JitExpandArrayStack<LcOptInfo*>* LoopCloneContext::EnsureLoopOptInfo(unsigned loopNum) { if (optInfo[loopNum] == nullptr) { optInfo[loopNum] = new (alloc) JitExpandArrayStack<LcOptInfo*>(alloc, 4); } return optInfo[loopNum]; } //-------------------------------------------------------------------------------------------------- // EnsureLoopOptInfo - Retrieve the loop cloning conditions candidate array, // if it is not present, allocate memory. // // Arguments: // loopNum the loop index. // // Return Values: // The array of cloning conditions for the loop. // JitExpandArrayStack<LC_Condition>* LoopCloneContext::EnsureConditions(unsigned loopNum) { if (conditions[loopNum] == nullptr) { conditions[loopNum] = new (alloc) JitExpandArrayStack<LC_Condition>(alloc, 4); } return conditions[loopNum]; } //-------------------------------------------------------------------------------------------------- // GetConditions - Get the cloning conditions array for the loop, no allocation. // // Arguments: // loopNum the loop index. // // Return Values: // The array of cloning conditions for the loop. // JitExpandArrayStack<LC_Condition>* LoopCloneContext::GetConditions(unsigned loopNum) { return conditions[loopNum]; } //-------------------------------------------------------------------------------------------------- // EnsureDerefs - Ensure an array of dereferences is created if it doesn't exist. // // Arguments: // loopNum the loop index. // // Return Values: // The array of dereferences for the loop. // JitExpandArrayStack<LC_Array>* LoopCloneContext::EnsureDerefs(unsigned loopNum) { if (derefs[loopNum] == nullptr) { derefs[loopNum] = new (alloc) JitExpandArrayStack<LC_Array>(alloc, 4); } return derefs[loopNum]; } //-------------------------------------------------------------------------------------------------- // HasBlockConditions - Check if there are block level conditions for the loop. // // Arguments: // loopNum the loop index. // // Return Values: // Return true if there are any block level conditions. // bool LoopCloneContext::HasBlockConditions(unsigned loopNum) { JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* levelCond = blockConditions[loopNum]; if (levelCond == nullptr) { return false; } // Walk through each block to check if any of them has conditions. for (unsigned i = 0; i < levelCond->Size(); ++i) { if ((*levelCond)[i]->Size() > 0) { return true; } } return false; } //-------------------------------------------------------------------------------------------------- // GetBlockConditions - Return block level conditions for the loop. // // Arguments: // loopNum the loop index. // // Return Values: // Return block conditions. // JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* LoopCloneContext::GetBlockConditions(unsigned loopNum) { assert(HasBlockConditions(loopNum)); return blockConditions[loopNum]; } //-------------------------------------------------------------------------------------------------- // EnsureBlockConditions - Allocate block level conditions for the loop if not exists. // // Arguments: // loopNum the loop index. // condBlocks the number of block-level conditions for each loop, corresponding to the blocks // created. // // Return Values: // Return block conditions. // JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* LoopCloneContext::EnsureBlockConditions(unsigned loopNum, unsigned condBlocks) { if (blockConditions[loopNum] == nullptr) { blockConditions[loopNum] = new (alloc) JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>(alloc, condBlocks); } JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* levelCond = blockConditions[loopNum]; for (unsigned i = 0; i < condBlocks; ++i) { levelCond->Set(i, new (alloc) JitExpandArrayStack<LC_Condition>(alloc)); } return levelCond; } #ifdef DEBUG void LoopCloneContext::PrintBlockConditions(unsigned loopNum) { printf("Block conditions:\n"); JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* blockConds = blockConditions[loopNum]; if (blockConds == nullptr || blockConds->Size() == 0) { printf("No block conditions\n"); return; } for (unsigned i = 0; i < blockConds->Size(); ++i) { PrintBlockLevelConditions(i, (*blockConds)[i]); } } void LoopCloneContext::PrintBlockLevelConditions(unsigned level, JitExpandArrayStack<LC_Condition>* levelCond) { printf("%d = ", level); for (unsigned j = 0; j < levelCond->Size(); ++j) { if (j != 0) { printf(" && "); } printf("("); (*levelCond)[j].Print(); printf(")"); } printf("\n"); } #endif //-------------------------------------------------------------------------------------------------- // EvaluateConditions - Evaluate the loop cloning conditions statically, if they can be evaluated. // // Arguments: // loopNum the loop index. // pAllTrue OUT parameter. `*pAllTrue` is set to `true` if all the cloning conditions statically // evaluate to true. // pAnyFalse OUT parameter. `*pAnyFalse` is set to `true` if some cloning condition statically // evaluate to false. // verbose verbose logging required. // // Return Values: // None. // // Operation: // For example, a condition like "V02 >= V02" statically evaluates to true. Caller should detect such // conditions and remove them from the "conditions" array. // // Similarly, conditions like "V02 > V02" will evaluate to "false". In this case caller has to abort // loop cloning optimization for the loop. Note that the assumption for conditions is that they will // all be "AND"ed, so statically we know we will never take the fast path. // // Sometimes we simply can't say statically whether "V02 > V01.length" is true or false. // In that case, `*pAllTrue` will be false because this condition doesn't evaluate to "true" and // `*pAnyFalse` could be false if no other condition statically evaluates to "false". // // If `*pAnyFalse` is true, we set that and return, and `*pAllTrue` is not accurate, since the loop cloning // needs to be aborted. // void LoopCloneContext::EvaluateConditions(unsigned loopNum, bool* pAllTrue, bool* pAnyFalse DEBUGARG(bool verbose)) { bool allTrue = true; bool anyFalse = false; JitExpandArrayStack<LC_Condition>& conds = *conditions[loopNum]; JITDUMP("Evaluating %d loop cloning conditions for loop " FMT_LP "\n", conds.Size(), loopNum); assert(conds.Size() > 0); for (unsigned i = 0; i < conds.Size(); ++i) { #ifdef DEBUG if (verbose) { printf("Considering condition %d: (", i); conds[i].Print(); } #endif bool res = false; // Check if this condition evaluates to true or false. if (conds[i].Evaluates(&res)) { JITDUMP(") evaluates to %s\n", dspBool(res)); if (!res) { anyFalse = true; // Since this will force us to abort loop cloning, there is no need compute an accurate `allTrue`, // so we can break out of the loop now. break; } } else { JITDUMP("), could not be evaluated\n"); allTrue = false; } } JITDUMP("Evaluation result allTrue = %s, anyFalse = %s\n", dspBool(allTrue), dspBool(anyFalse)); *pAllTrue = allTrue; *pAnyFalse = anyFalse; } //-------------------------------------------------------------------------------------------------- // OptimizeConditions - Evaluate the loop cloning conditions statically, if they can be evaluated // then optimize the "conditions" array accordingly. // // Arguments: // conds The conditions array to optimize. // // Return Values: // None. // // Operation: // For example, a condition like "V02 >= V02" statically evaluates to true. Remove such conditions // from the "conditions" array. // // Similarly, conditions like "V02 > V02" will evaluate to "false". In this case abort loop cloning // optimization for the loop. // // Sometimes, two conditions will combine together to yield a single condition, then remove a // duplicate condition. void LoopCloneContext::OptimizeConditions(JitExpandArrayStack<LC_Condition>& conds) { for (unsigned i = 0; i < conds.Size(); ++i) { // Check if the conditions evaluate. bool result = false; if (conds[i].Evaluates(&result)) { // If statically known to be true, then remove this condition. if (result) { conds.Remove(i); --i; continue; } else { // Some condition is statically false, then simply indicate // not to clone this loop. CancelLoopOptInfo(i); break; } } // Check for all other conditions[j], if it would combine with // conditions[i]. for (unsigned j = i + 1; j < conds.Size(); ++j) { LC_Condition newCond; if (conds[i].Combines(conds[j], &newCond)) { conds.Remove(j); conds[i] = newCond; i = -1; break; } } } #ifdef DEBUG // Make sure we didn't miss some combining. for (unsigned i = 0; i < conds.Size(); ++i) { for (unsigned j = 0; j < conds.Size(); ++j) { LC_Condition newCond; if ((i != j) && conds[i].Combines(conds[j], &newCond)) { assert(!"Loop cloning conditions can still be optimized further."); } } } #endif } //-------------------------------------------------------------------------------------------------- // OptimizeBlockConditions - Optimize block level conditions. // // Arguments: // loopNum the loop index. // // Operation: // Calls OptimizeConditions helper on block level conditions. // // Return Values: // None. // void LoopCloneContext::OptimizeBlockConditions(unsigned loopNum DEBUGARG(bool verbose)) { if (!HasBlockConditions(loopNum)) { return; } JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* levelCond = blockConditions[loopNum]; for (unsigned i = 0; i < levelCond->Size(); ++i) { OptimizeConditions(*((*levelCond)[i])); } #ifdef DEBUG if (verbose) { printf("After optimizing block-level cloning conditions\n\t"); PrintConditions(loopNum); printf("\n"); } #endif } //-------------------------------------------------------------------------------------------------- // OptimizeConditions - Optimize cloning conditions. // // Arguments: // loopNum the loop index. // verbose verbose logging required. // // Operation: // Calls OptimizeConditions helper on cloning conditions. // // Return Values: // None. // void LoopCloneContext::OptimizeConditions(unsigned loopNum DEBUGARG(bool verbose)) { #ifdef DEBUG if (verbose) { printf("Before optimizing cloning conditions\n\t"); PrintConditions(loopNum); printf("\n"); } #endif JitExpandArrayStack<LC_Condition>& conds = *conditions[loopNum]; OptimizeConditions(conds); #ifdef DEBUG if (verbose) { printf("After optimizing cloning conditions\n\t"); PrintConditions(loopNum); printf("\n"); } #endif } #ifdef DEBUG //-------------------------------------------------------------------------------------------------- // PrintConditions - Print loop cloning conditions necessary to clone the loop. // // Arguments: // loopNum the loop index. // // Return Values: // None. // void LoopCloneContext::PrintConditions(unsigned loopNum) { if (conditions[loopNum] == nullptr) { printf("NO conditions"); return; } if (conditions[loopNum]->Size() == 0) { printf("Conditions were optimized away! Will always take cloned path."); return; } for (unsigned i = 0; i < conditions[loopNum]->Size(); ++i) { if (i != 0) { printf(" && "); } printf("("); (*conditions[loopNum])[i].Print(); printf(")"); } } #endif //-------------------------------------------------------------------------------------------------- // CondToStmtInBlock: Convert an array of conditions to IR. Evaluate them into a JTRUE stmt and add it to // a new block after `insertAfter`. // // Arguments: // comp Compiler instance // conds Array of conditions to evaluate into a JTRUE stmt // slowHead Branch here on condition failure // insertAfter Insert the conditions in a block after this block // // Notes: // If any condition fails, branch to the `slowHead` block. There are two options here: // 1. Generate all the conditions in a single block using bitwise `&` to merge them, e.g.: // jmpTrue(cond1 & cond2 ... == 0) => slowHead // In this form, we always execute all the conditions (there is no short-circuit evaluation). // Since we expect that in the usual case all the conditions will fail, and we'll execute the // loop fast path, the lack of short-circuit evaluation is not a problem. If the code is smaller // and faster, this would be preferable. // 2. Generate each condition in a separate block, e.g.: // jmpTrue(!cond1) => slowHead // jmpTrue(!cond2) => slowHead // ... // If this code is smaller/faster, this can be preferable. Also, the flow graph is more normal, // and amenable to downstream flow optimizations. // // Which option we choose is currently compile-time determined. // // We assume that `insertAfter` is a fall-through block, and we add it to the predecessors list // of the first newly added block. `insertAfter` is also assumed to be in the same loop (we can // clone its loop number). // // Return Value: // Last block added // BasicBlock* LoopCloneContext::CondToStmtInBlock(Compiler* comp, JitExpandArrayStack<LC_Condition>& conds, BasicBlock* slowHead, BasicBlock* insertAfter) { noway_assert(conds.Size() > 0); assert(slowHead != nullptr); assert(insertAfter->KindIs(BBJ_NONE, BBJ_COND)); // Choose how to generate the conditions const bool generateOneConditionPerBlock = true; if (generateOneConditionPerBlock) { BasicBlock* newBlk = nullptr; for (unsigned i = 0; i < conds.Size(); ++i) { newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true); newBlk->inheritWeight(insertAfter); newBlk->bbNatLoopNum = insertAfter->bbNatLoopNum; newBlk->bbJumpDest = slowHead; JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, newBlk->bbJumpDest->bbNum); comp->fgAddRefPred(newBlk->bbJumpDest, newBlk); JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", insertAfter->bbNum, newBlk->bbNum); comp->fgAddRefPred(newBlk, insertAfter); JITDUMP("Adding conditions %u to " FMT_BB "\n", i, newBlk->bbNum); GenTree* cond = conds[i].ToGenTree(comp, newBlk, /* invert */ true); GenTree* jmpTrueTree = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, cond); Statement* stmt = comp->fgNewStmtFromTree(jmpTrueTree); comp->fgInsertStmtAtEnd(newBlk, stmt); // Remorph. JITDUMP("Loop cloning condition tree before morphing:\n"); DBEXEC(comp->verbose, comp->gtDispTree(jmpTrueTree)); JITDUMP("\n"); comp->fgMorphBlockStmt(newBlk, stmt DEBUGARG("Loop cloning condition")); insertAfter = newBlk; } return newBlk; } else { BasicBlock* newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true); newBlk->inheritWeight(insertAfter); newBlk->bbNatLoopNum = insertAfter->bbNatLoopNum; newBlk->bbJumpDest = slowHead; JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, newBlk->bbJumpDest->bbNum); comp->fgAddRefPred(newBlk->bbJumpDest, newBlk); JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", insertAfter->bbNum, newBlk->bbNum); comp->fgAddRefPred(newBlk, insertAfter); JITDUMP("Adding conditions to " FMT_BB "\n", newBlk->bbNum); // Get the first condition. GenTree* cond = conds[0].ToGenTree(comp, newBlk, /* invert */ false); for (unsigned i = 1; i < conds.Size(); ++i) { // Append all conditions using AND operator. cond = comp->gtNewOperNode(GT_AND, TYP_INT, cond, conds[i].ToGenTree(comp, newBlk, /* invert */ false)); } // Add "cond == 0" node cond = comp->gtNewOperNode(GT_EQ, TYP_INT, cond, comp->gtNewIconNode(0)); // Add jmpTrue "cond == 0" GenTree* jmpTrueTree = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, cond); Statement* stmt = comp->fgNewStmtFromTree(jmpTrueTree); comp->fgInsertStmtAtEnd(newBlk, stmt); // Remorph. JITDUMP("Loop cloning condition tree before morphing:\n"); DBEXEC(comp->verbose, comp->gtDispTree(jmpTrueTree)); JITDUMP("\n"); comp->fgMorphBlockStmt(newBlk, stmt DEBUGARG("Loop cloning condition")); return newBlk; } } //-------------------------------------------------------------------------------------------------- // Lcl - the current node's local variable. // // Arguments: // None. // // Operation: // If level is 0, then just return the array base. Else return the index variable on dim 'level' // // Return Values: // The local variable in the node's level. // unsigned LC_Deref::Lcl() { unsigned lvl = level; if (lvl == 0) { return array.arrIndex->arrLcl; } lvl--; return array.arrIndex->indLcls[lvl]; } //-------------------------------------------------------------------------------------------------- // HasChildren - Check if there are children to 'this' node. // // Arguments: // None. // // Return Values: // Return true if children are present. // bool LC_Deref::HasChildren() { return children != nullptr && children->Size() > 0; } //-------------------------------------------------------------------------------------------------- // DeriveLevelConditions - Generate conditions for each level of the tree. // // Arguments: // conds An array of conditions for each level i.e., (level x conditions). This array will // contain the conditions for the tree at the end of the method. // // Operation: // level0 yields only (a != null) condition. All other levels yield two conditions: // (level < a[...].length && a[...][level] != null) // // Return Values: // None // void LC_Deref::DeriveLevelConditions(JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* conds) { if (level == 0) { // For level 0, just push (a != null). (*conds)[level]->Push( LC_Condition(GT_NE, LC_Expr(LC_Ident(Lcl(), LC_Ident::Var)), LC_Expr(LC_Ident(LC_Ident::Null)))); } else { // Adjust for level0 having just 1 condition and push condition (i < a.len). LC_Array arrLen = array; arrLen.oper = LC_Array::ArrLen; arrLen.dim = level - 1; (*conds)[level * 2 - 1]->Push( LC_Condition(GT_LT, LC_Expr(LC_Ident(Lcl(), LC_Ident::Var)), LC_Expr(LC_Ident(arrLen)))); // Push condition (a[i] != null) LC_Array arrTmp = array; arrTmp.dim = level; (*conds)[level * 2]->Push(LC_Condition(GT_NE, LC_Expr(LC_Ident(arrTmp)), LC_Expr(LC_Ident(LC_Ident::Null)))); } // Invoke on the children recursively. if (HasChildren()) { for (unsigned i = 0; i < children->Size(); ++i) { (*children)[i]->DeriveLevelConditions(conds); } } } //-------------------------------------------------------------------------------------------------- // EnsureChildren - Create an array of child nodes if nullptr. // // Arguments: // alloc CompAllocator instance // // Return Values: // None // void LC_Deref::EnsureChildren(CompAllocator alloc) { if (children == nullptr) { children = new (alloc) JitExpandArrayStack<LC_Deref*>(alloc); } } //-------------------------------------------------------------------------------------------------- // Find - Find the node representing the local variable in child nodes of the 'this' node. // // Arguments: // lcl the local to find in the children array // // Return Values: // The child node if found or nullptr. // LC_Deref* LC_Deref::Find(unsigned lcl) { return Find(children, lcl); } //-------------------------------------------------------------------------------------------------- // Find - Find the node representing the local variable in a list of nodes. // // Arguments: // lcl the local to find. // children the list of nodes to find the node representing the lcl. // // Return Values: // The node if found or nullptr. // // static LC_Deref* LC_Deref::Find(JitExpandArrayStack<LC_Deref*>* children, unsigned lcl) { if (children == nullptr) { return nullptr; } for (unsigned i = 0; i < children->Size(); ++i) { if ((*children)[i]->Lcl() == lcl) { return (*children)[i]; } } return nullptr; } //------------------------------------------------------------------------ // optDeriveLoopCloningConditions: Derive loop cloning conditions. // // Arguments: // loopNum - the current loop index for which conditions are derived. // context - data structure where all loop cloning info is kept. // // Return Value: // "false" if conditions cannot be obtained. "true" otherwise. // The cloning conditions are updated in the "conditions"[loopNum] field // of the "context" parameter. // // Operation: // Inspect the loop cloning optimization candidates and populate the conditions necessary // for each optimization candidate. Checks if the loop stride is "> 0" if the loop // condition is `<` or `<=`. If the initializer is "var" init then adds condition // "var >= 0", and if the loop is var limit then, "var >= 0" and "var <= a.len" // are added to "context". These conditions are checked in the pre-header block // and the cloning choice is made. // // Assumption: // Callers should assume AND operation is used i.e., if all conditions are // true, then take the fast path. // bool Compiler::optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context) { JITDUMP("------------------------------------------------------------\n"); JITDUMP("Deriving cloning conditions for " FMT_LP "\n", loopNum); LoopDsc* loop = &optLoopTable[loopNum]; JitExpandArrayStack<LcOptInfo*>* optInfos = context->GetLoopOptInfo(loopNum); if (GenTree::StaticOperIs(loop->lpTestOper(), GT_LT, GT_LE)) { // Stride conditions if (loop->lpIterConst() <= 0) { JITDUMP("> Stride %d is invalid\n", loop->lpIterConst()); return false; } // Init conditions if (loop->lpFlags & LPFLG_CONST_INIT) { // Only allowing non-negative const init at this time. // This is because the variable initialized with this constant will be used as an array index, // and array indices must be non-negative. if (loop->lpConstInit < 0) { JITDUMP("> Init %d is invalid\n", loop->lpConstInit); return false; } } else { // iterVar >= 0 const unsigned initLcl = loop->lpIterVar(); if (!genActualTypeIsInt(lvaGetDesc(initLcl))) { JITDUMP("> Init var V%02u not compatible with TYP_INT\n", initLcl); return false; } LC_Condition geZero(GT_GE, LC_Expr(LC_Ident(initLcl, LC_Ident::Var)), LC_Expr(LC_Ident(0, LC_Ident::Const))); context->EnsureConditions(loopNum)->Push(geZero); } // Limit Conditions LC_Ident ident; if (loop->lpFlags & LPFLG_CONST_LIMIT) { int limit = loop->lpConstLimit(); if (limit < 0) { JITDUMP("> limit %d is invalid\n", limit); return false; } ident = LC_Ident(static_cast<unsigned>(limit), LC_Ident::Const); } else if (loop->lpFlags & LPFLG_VAR_LIMIT) { const unsigned limitLcl = loop->lpVarLimit(); if (!genActualTypeIsInt(lvaGetDesc(limitLcl))) { JITDUMP("> Limit var V%02u not compatible with TYP_INT\n", limitLcl); return false; } ident = LC_Ident(limitLcl, LC_Ident::Var); LC_Condition geZero(GT_GE, LC_Expr(ident), LC_Expr(LC_Ident(0, LC_Ident::Const))); context->EnsureConditions(loopNum)->Push(geZero); } else if (loop->lpFlags & LPFLG_ARRLEN_LIMIT) { ArrIndex* index = new (getAllocator(CMK_LoopClone)) ArrIndex(getAllocator(CMK_LoopClone)); if (!loop->lpArrLenLimit(this, index)) { JITDUMP("> ArrLen not matching\n"); return false; } ident = LC_Ident(LC_Array(LC_Array::Jagged, index, LC_Array::ArrLen)); // Ensure that this array must be dereference-able, before executing the actual condition. LC_Array array(LC_Array::Jagged, index, LC_Array::None); context->EnsureDerefs(loopNum)->Push(array); } else { JITDUMP("> Undetected limit\n"); return false; } // GT_LT loop test: limit <= arrLen // GT_LE loop test: limit < arrLen genTreeOps opLimitCondition; switch (loop->lpTestOper()) { case GT_LT: opLimitCondition = GT_LE; break; case GT_LE: opLimitCondition = GT_LT; break; default: unreached(); } for (unsigned i = 0; i < optInfos->Size(); ++i) { LcOptInfo* optInfo = optInfos->Get(i); switch (optInfo->GetOptType()) { case LcOptInfo::LcJaggedArray: { LcJaggedArrayOptInfo* arrIndexInfo = optInfo->AsLcJaggedArrayOptInfo(); LC_Array arrLen(LC_Array::Jagged, &arrIndexInfo->arrIndex, arrIndexInfo->dim, LC_Array::ArrLen); LC_Ident arrLenIdent = LC_Ident(arrLen); LC_Condition cond(opLimitCondition, LC_Expr(ident), LC_Expr(arrLenIdent)); context->EnsureConditions(loopNum)->Push(cond); // Ensure that this array must be dereference-able, before executing the actual condition. LC_Array array(LC_Array::Jagged, &arrIndexInfo->arrIndex, arrIndexInfo->dim, LC_Array::None); context->EnsureDerefs(loopNum)->Push(array); } break; case LcOptInfo::LcMdArray: { LcMdArrayOptInfo* mdArrInfo = optInfo->AsLcMdArrayOptInfo(); LC_Array arrLen(LC_Array(LC_Array::MdArray, mdArrInfo->GetArrIndexForDim(getAllocator(CMK_LoopClone)), mdArrInfo->dim, LC_Array::None)); LC_Ident arrLenIdent = LC_Ident(arrLen); LC_Condition cond(opLimitCondition, LC_Expr(ident), LC_Expr(arrLenIdent)); context->EnsureConditions(loopNum)->Push(cond); // TODO: ensure array is dereference-able? } break; default: JITDUMP("Unknown opt\n"); return false; } } JITDUMP("Conditions: "); DBEXEC(verbose, context->PrintConditions(loopNum)); JITDUMP("\n"); return true; } return false; } //------------------------------------------------------------------------------------ // optComputeDerefConditions: Derive loop cloning conditions for dereferencing arrays. // // Arguments: // loopNum - the current loop index for which conditions are derived. // context - data structure where all loop cloning info is kept. // // Return Value: // "false" if conditions cannot be obtained. "true" otherwise. // The deref conditions are updated in the "derefConditions"[loopNum] field // of the "context" parameter. // // Definition of Deref Conditions: // To be able to check for the loop cloning condition that (limitVar <= a.len) // we should first be able to dereference "a". i.e., "a" is non-null. // // Example: // // for (i in 0..n) // for (j in 0..n) // for (k in 0..n) // Inner most loop is being cloned. Cloning needs to check if // // (n <= a[i][j].len) and other safer conditions to take the fast path // a[i][j][k] = 0 // // Now, we want to deref a[i][j] to invoke length operator on it to perform the cloning fast path check. // This involves deref of (a), (a[i]), (a[i][j]), therefore, the following should first // be true to do the deref. // // (a != null) && (i < a.len) && (a[i] != null) && (j < a[i].len) && (a[i][j] != null) --> condition set (1) // // Note the short circuiting AND. Implication: these conditions should be performed in separate // blocks each of which will branch to slow path if the condition evaluates to false. // // Now, imagine a situation where, in the inner loop above, in addition to "a[i][j][k] = 0" we // also have: // a[x][y][k] = 20 // where x and y are parameters, then our conditions will have to include: // (x < a.len) && // (y < a[x].len) // in addition to the above conditions (1) to get rid of bounds check on index 'k' // // But these conditions can be checked together with conditions // (i < a.len) without a need for a separate block. In summary, the conditions will be: // // (a != null) && // ((i < a.len) & (x < a.len)) && <-- Note the bitwise AND here. // (a[i] != null & a[x] != null) && <-- Note the bitwise AND here. // (j < a[i].len & y < a[x].len) && <-- Note the bitwise AND here. // (a[i][j] != null & a[x][y] != null) <-- Note the bitwise AND here. // // This naturally yields a tree style pattern, where the nodes of the tree are // the array and indices respectively. // // Example: // a => { // i => { // j => { // k => {} // } // }, // x => { // y => { // k => {} // } // } // } // // Notice that the variables in the same levels can have their conditions combined in the // same block with a bitwise AND. Whereas, the conditions in consecutive levels will be // combined with a short-circuiting AND (i.e., different basic blocks). // // Operation: // Construct a tree of array indices and the array which will generate the optimal // conditions for loop cloning. // // a[i][j][k], b[i] and a[i][y][k] are the occurrences in the loop. Then, the tree should be: // // a => { // i => { // j => { // k => {} // }, // y => { // k => {} // }, // } // }, // b => { // i => {} // } // // In this method, we will construct such a tree by descending depth first into the array // index operation and forming a tree structure as we encounter the array or the index variables. // // This tree structure will then be used to generate conditions like below: // (a != null) & (b != null) && // from the first level of the tree. // // (i < a.len) & (i < b.len) && // from the second level of the tree. Levels can be combined. // (a[i] != null) & (b[i] != null) && // from the second level of the tree. // // (j < a[i].len) & (y < a[i].len) && // from the third level. // (a[i][j] != null) & (a[i][y] != null) && // from the third level. // // and so on. // bool Compiler::optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context) { JitExpandArrayStack<LC_Deref*> nodes(getAllocator(CMK_LoopClone)); int maxRank = -1; // Get the dereference-able arrays. JitExpandArrayStack<LC_Array>* deref = context->EnsureDerefs(loopNum); // For each array in the dereference list, construct a tree, // where the nodes are array and index variables and an edge 'u-v' // exists if a node 'v' indexes node 'u' directly as in u[v] or an edge // 'u-v-w' transitively if u[v][w] occurs. for (unsigned i = 0; i < deref->Size(); ++i) { LC_Array& array = (*deref)[i]; // First populate the array base variable. LC_Deref* node = LC_Deref::Find(&nodes, array.arrIndex->arrLcl); if (node == nullptr) { node = new (getAllocator(CMK_LoopClone)) LC_Deref(array, 0 /*level*/); nodes.Push(node); } // For each dimension (level) for the array, populate the tree with the variable // from that dimension. unsigned rank = (unsigned)array.GetDimRank(); for (unsigned i = 0; i < rank; ++i) { node->EnsureChildren(getAllocator(CMK_LoopClone)); LC_Deref* tmp = node->Find(array.arrIndex->indLcls[i]); if (tmp == nullptr) { tmp = new (getAllocator(CMK_LoopClone)) LC_Deref(array, node->level + 1); node->children->Push(tmp); } // Descend one level down. node = tmp; } // Keep the maxRank of all array dereferences. maxRank = max((int)rank, maxRank); } #ifdef DEBUG if (verbose) { printf("Deref condition tree:\n"); for (unsigned i = 0; i < nodes.Size(); ++i) { nodes[i]->Print(); printf("\n"); } } #endif if (maxRank == -1) { JITDUMP("> maxRank undefined\n"); return false; } // First level will always yield the null-check, since it is made of the array base variables. // All other levels (dimensions) will yield two conditions ex: (i < a.length && a[i] != null) // So add 1 after rank * 2. unsigned condBlocks = (unsigned)maxRank * 2 + 1; // Heuristic to not create too many blocks. Defining as 3 allows, effectively, loop cloning on // doubly-nested loops. // REVIEW: make this based on a COMPlus configuration, at least for debug? const unsigned maxAllowedCondBlocks = 3; if (condBlocks > maxAllowedCondBlocks) { JITDUMP("> Too many condition blocks (%u > %u)\n", condBlocks, maxAllowedCondBlocks); return false; } // Derive conditions into an 'array of level x array of conditions' i.e., levelCond[levels][conds] JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* levelCond = context->EnsureBlockConditions(loopNum, condBlocks); for (unsigned i = 0; i < nodes.Size(); ++i) { nodes[i]->DeriveLevelConditions(levelCond); } DBEXEC(verbose, context->PrintBlockConditions(loopNum)); return true; } #ifdef DEBUG //---------------------------------------------------------------------------- // optDebugLogLoopCloning: Insert a call to jithelper that prints a message. // // Arguments: // block - the block in which the helper call needs to be inserted. // insertBefore - the stmt before which the helper call will be inserted. // void Compiler::optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore) { if (JitConfig.JitDebugLogLoopCloning() == 0) { return; } GenTree* logCall = gtNewHelperCallNode(CORINFO_HELP_DEBUG_LOG_LOOP_CLONING, TYP_VOID); Statement* stmt = fgNewStmtFromTree(logCall); fgInsertStmtBefore(block, insertBefore, stmt); fgMorphBlockStmt(block, stmt DEBUGARG("Debug log loop cloning")); } #endif // DEBUG //------------------------------------------------------------------------ // optPerformStaticOptimizations: Perform the optimizations for the optimization // candidates gathered during the cloning phase. // // Arguments: // loopNum - the current loop index for which the optimizations are performed. // context - data structure where all loop cloning info is kept. // dynamicPath - If true, the optimization is performed in the fast path among the // cloned loops. If false, it means this is the only path (i.e., // there is no slow path.) // // Operation: // Perform the optimizations on the fast path i.e., the path in which the // optimization candidates were collected at the time of identifying them. // The candidates store all the information necessary (the tree/stmt/block // they are from) to perform the optimization. // // Assumption: // The unoptimized path is either already cloned when this method is called or // there is no unoptimized path (got eliminated statically.) So this method // performs the optimizations assuming that the path in which the candidates // were collected is the fast path in which the optimizations will be performed. // void Compiler::optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool dynamicPath)) { JitExpandArrayStack<LcOptInfo*>* optInfos = context->GetLoopOptInfo(loopNum); assert(optInfos != nullptr); for (unsigned i = 0; i < optInfos->Size(); ++i) { LcOptInfo* optInfo = optInfos->Get(i); switch (optInfo->GetOptType()) { case LcOptInfo::LcJaggedArray: { LcJaggedArrayOptInfo* arrIndexInfo = optInfo->AsLcJaggedArrayOptInfo(); compCurBB = arrIndexInfo->arrIndex.useBlock; // Remove all bounds checks for this array up to (and including) `arrIndexInfo->dim`. So, if that is 1, // Remove rank 0 and 1 bounds checks. for (unsigned dim = 0; dim <= arrIndexInfo->dim; dim++) { GenTree* bndsChkNode = arrIndexInfo->arrIndex.bndsChks[dim]; #ifdef DEBUG if (verbose) { printf("Remove bounds check "); printTreeID(bndsChkNode->gtGetOp1()); printf(" for " FMT_STMT ", dim% d, ", arrIndexInfo->stmt->GetID(), dim); arrIndexInfo->arrIndex.Print(); printf(", bounds check nodes: "); arrIndexInfo->arrIndex.PrintBoundsCheckNodes(); printf("\n"); } #endif // DEBUG if (bndsChkNode->gtGetOp1()->OperIs(GT_BOUNDS_CHECK)) { // This COMMA node will only represent a bounds check if we've haven't already removed this // bounds check in some other nesting cloned loop. For example, consider: // for (i = 0; i < x; i++) // for (j = 0; j < y; j++) // a[i][j] = i + j; // If the outer loop is cloned first, it will remove the a[i] bounds check from the optimized // path. Later, when the inner loop is cloned, we want to remove the a[i][j] bounds check. If // we clone the inner loop, we know that the a[i] bounds check isn't required because we'll add // it to the loop cloning conditions. On the other hand, we can clone a loop where we get rid of // the nested bounds check but nobody has gotten rid of the outer bounds check. As before, we // know the outer bounds check is not needed because it's been added to the cloning conditions, // so we can get rid of the bounds check here. // optRemoveCommaBasedRangeCheck(bndsChkNode, arrIndexInfo->stmt); } else { JITDUMP(" Bounds check already removed\n"); // If the bounds check node isn't there, it better have been converted to a GT_NOP. assert(bndsChkNode->gtGetOp1()->OperIs(GT_NOP)); } } DBEXEC(dynamicPath, optDebugLogLoopCloning(arrIndexInfo->arrIndex.useBlock, arrIndexInfo->stmt)); } break; case LcOptInfo::LcMdArray: // TODO-CQ: CLONE: Implement. break; default: break; } } } //---------------------------------------------------------------------------- // optIsLoopClonable: Determine whether this loop can be cloned. // // Arguments: // loopInd loop index which needs to be checked if it can be cloned. // // Return Value: // Returns true if the loop can be cloned. If it returns false, // it prints a message to the JIT dump describing why the loop can't be cloned. // // Notes: if `true` is returned, then `fgReturnCount` is increased by the number of // return blocks in the loop that will be cloned. (REVIEW: this 'predicate' function // doesn't seem like the right place to do this change.) // bool Compiler::optIsLoopClonable(unsigned loopInd) { const LoopDsc& loop = optLoopTable[loopInd]; if (!(loop.lpFlags & LPFLG_ITER)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". No LPFLG_ITER flag.\n", loopInd); return false; } if (loop.lpFlags & LPFLG_REMOVED) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". It is marked LPFLG_REMOVED.\n", loopInd); return false; } // Make sure the loop doesn't have any embedded exception handling. // Walk the loop blocks from lexically first to lexically last (all blocks in this region must be // part of the loop), looking for a `try` begin block. Note that a loop must entirely contain any // EH region, or be itself entirely contained within an EH region. Thus, looking just for a `try` // begin is sufficient; there is no need to look for other EH constructs, such as a `catch` begin. // // TODO: this limitation could be removed if we do the work to insert new EH regions in the exception table, // for the cloned loop (and its embedded EH regions). // // Also, count the number of return blocks within the loop for future use. unsigned loopRetCount = 0; for (BasicBlock* const blk : loop.LoopBlocks()) { if (blk->bbJumpKind == BBJ_RETURN) { loopRetCount++; } if (bbIsTryBeg(blk)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". It has a `try` begin.\n", loopInd); return false; } } // Is the entry block a handler or filter start? If so, then if we cloned, we could create a jump // into the middle of a handler (to go to the cloned copy.) Reject. if (bbIsHandlerBeg(loop.lpEntry)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Entry block is a handler start.\n", loopInd); return false; } // If the head and entry are in different EH regions, reject. if (!BasicBlock::sameEHRegion(loop.lpHead, loop.lpEntry)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Head and entry blocks are in different EH regions.\n", loopInd); return false; } // Is the first block after the last block of the loop a handler or filter start? // Usually, we create a dummy block after the orginal loop, to skip over the loop clone // and go to where the original loop did. That raises problems when we don't actually go to // that block; this is one of those cases. This could be fixed fairly easily; for example, // we could add a dummy nop block after the (cloned) loop bottom, in the same handler scope as the // loop. This is just a corner to cut to get this working faster. BasicBlock* bbAfterLoop = loop.lpBottom->bbNext; if (bbAfterLoop != nullptr && bbIsHandlerBeg(bbAfterLoop)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Next block after bottom is a handler start.\n", loopInd); return false; } // We've previously made a decision whether to have separate return epilogs, or branch to one. // There's a GCInfo limitation in the x86 case, so that there can be no more than SET_EPILOGCNT_MAX separate // epilogs. Other architectures have a limit of 4 here for "historical reasons", but this should be revisited // (or return blocks should not be considered part of the loop, rendering this issue moot). unsigned epilogLimit = 4; #ifdef JIT32_GCENCODER epilogLimit = SET_EPILOGCNT_MAX; #endif // JIT32_GCENCODER if (fgReturnCount + loopRetCount > epilogLimit) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". It has %d returns;" " if added to previously existing %d returns, it would exceed the limit of %d.\n", loopInd, loopRetCount, fgReturnCount, epilogLimit); return false; } unsigned ivLclNum = loop.lpIterVar(); if (lvaVarAddrExposed(ivLclNum)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Rejected V%02u as iter var because is address-exposed.\n", loopInd, ivLclNum); return false; } BasicBlock* top = loop.lpTop; BasicBlock* bottom = loop.lpBottom; if (bottom->bbJumpKind != BBJ_COND) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Couldn't find termination test.\n", loopInd); return false; } if (bottom->bbJumpDest != top) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Branch at loop 'bottom' not looping to 'top'.\n", loopInd); return false; } // TODO-CQ: CLONE: Mark increasing or decreasing loops. if ((loop.lpIterOper() != GT_ADD) || (loop.lpIterConst() != 1)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Loop iteration operator not matching.\n", loopInd); return false; } if ((loop.lpFlags & LPFLG_CONST_LIMIT) == 0 && (loop.lpFlags & LPFLG_VAR_LIMIT) == 0 && (loop.lpFlags & LPFLG_ARRLEN_LIMIT) == 0) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Loop limit is neither constant, variable or array length.\n", loopInd); return false; } if (!((GenTree::StaticOperIs(loop.lpTestOper(), GT_LT, GT_LE) && (loop.lpIterOper() == GT_ADD)) || (GenTree::StaticOperIs(loop.lpTestOper(), GT_GT, GT_GE) && (loop.lpIterOper() == GT_SUB)))) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Loop test (%s) doesn't agree with the direction (%s) of the loop.\n", loopInd, GenTree::OpName(loop.lpTestOper()), GenTree::OpName(loop.lpIterOper())); return false; } if (!loop.lpTestTree->OperIsCompare() || !(loop.lpTestTree->gtFlags & GTF_RELOP_ZTT)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Loop inversion NOT present, loop test [%06u] may not protect " "entry from head.\n", loopInd, loop.lpTestTree->gtTreeID); return false; } #ifdef DEBUG GenTree* op1 = loop.lpIterator(); assert((op1->gtOper == GT_LCL_VAR) && (op1->AsLclVarCommon()->GetLclNum() == ivLclNum)); #endif // Otherwise, we're going to add those return blocks. fgReturnCount += loopRetCount; return true; } //-------------------------------------------------------------------------------------------------- // optInsertLoopChoiceConditions: Insert the loop conditions for a loop after the loop head. // // Arguments: // context loop cloning context variable // loopNum the loop index // slowHead the slow path loop head, where the condition failures branch // insertAfter insert the conditions after this block // // Return Value: // The last condition block added. // // Operation: // Create the following structure. // // h (fall through) // !cond0 -?> slowHead // !cond1 -?> slowHead // ... // !condn -?> slowHead // h2/entry (fast) // ... // slowHead -?> e2 (slowHead) branch or fall-through to e2 // BasicBlock* Compiler::optInsertLoopChoiceConditions(LoopCloneContext* context, unsigned loopNum, BasicBlock* slowHead, BasicBlock* insertAfter) { JITDUMP("Inserting loop " FMT_LP " loop choice conditions\n", loopNum); assert(context->HasBlockConditions(loopNum)); assert(slowHead != nullptr); assert(insertAfter->bbJumpKind == BBJ_NONE); JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* levelCond = context->GetBlockConditions(loopNum); for (unsigned i = 0; i < levelCond->Size(); ++i) { JITDUMP("Adding loop " FMT_LP " level %u block conditions\n ", loopNum, i); DBEXEC(verbose, context->PrintBlockLevelConditions(i, (*levelCond)[i])); insertAfter = context->CondToStmtInBlock(this, *((*levelCond)[i]), slowHead, insertAfter); } // Finally insert cloning conditions after all deref conditions have been inserted. JITDUMP("Adding loop " FMT_LP " cloning conditions\n ", loopNum); DBEXEC(verbose, context->PrintConditions(loopNum)); JITDUMP("\n"); insertAfter = context->CondToStmtInBlock(this, *(context->GetConditions(loopNum)), slowHead, insertAfter); return insertAfter; } //------------------------------------------------------------------------ // OptEnsureUniqueHead: Ensure that loop "loopInd" has a unique head block. // If the existing entry has non-loop predecessors other than the head entry, // create a new, empty block that goes (only) to the entry, and redirects the // preds of the entry to this new block. Sets the weight of the newly created // block to "ambientWeight". // // NOTE: this is currently dead code, because it is only called by loop cloning, // and loop cloning only works with single-entry loops where the immediately // preceding head block is the only predecessor of the loop entry. // // Arguments: // loopInd - index of loop to process // ambientWeight - weight to give the new head, if created. // void Compiler::optEnsureUniqueHead(unsigned loopInd, weight_t ambientWeight) { LoopDsc& loop = optLoopTable[loopInd]; BasicBlock* h = loop.lpHead; BasicBlock* t = loop.lpTop; BasicBlock* e = loop.lpEntry; BasicBlock* b = loop.lpBottom; // If "h" dominates the entry block, then it is the unique header. if (fgDominate(h, e)) { return; } // Otherwise, create a new empty header block, make it the pred of the entry block, // and redirect the preds of the entry block to go to this. BasicBlock* beforeTop = t->bbPrev; assert(!beforeTop->bbFallsThrough() || (beforeTop->bbNext == e)); // Make sure that the new block is in the same region as the loop. // (We will only create loops that are entirely within a region.) BasicBlock* h2 = fgNewBBafter(BBJ_NONE, beforeTop, /*extendRegion*/ true); assert(beforeTop->bbNext == h2); // This is in the containing loop. h2->bbNatLoopNum = loop.lpParent; h2->bbWeight = h2->isRunRarely() ? BB_ZERO_WEIGHT : ambientWeight; if (h2->bbNext != e) { h2->bbJumpKind = BBJ_ALWAYS; h2->bbJumpDest = e; } BlockSetOps::Assign(this, h2->bbReach, e->bbReach); fgAddRefPred(e, h2); // Redirect paths from preds of "e" to go to "h2" instead of "e". BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone)); blockMap->Set(e, h2); for (BasicBlock* const predBlock : e->PredBlocks()) { // Skip if predBlock is in the loop. if (t->bbNum <= predBlock->bbNum && predBlock->bbNum <= b->bbNum) { continue; } optRedirectBlock(predBlock, blockMap); fgAddRefPred(h2, predBlock); fgRemoveRefPred(e, predBlock); } optUpdateLoopHead(loopInd, h, h2); } //------------------------------------------------------------------------ // optCloneLoop: Perform the mechanical cloning of the specified loop // // Arguments: // loopInd - loop index of loop to clone // context - data structure where all loop cloning info is kept. // void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) { assert(loopInd < optLoopCount); LoopDsc& loop = optLoopTable[loopInd]; JITDUMP("\nCloning loop " FMT_LP ": [head: " FMT_BB ", top: " FMT_BB ", entry: " FMT_BB ", bottom: " FMT_BB ", child: " FMT_LP "].\n", loopInd, loop.lpHead->bbNum, loop.lpTop->bbNum, loop.lpEntry->bbNum, loop.lpBottom->bbNum, loop.lpChild); // Determine the depth of the loop, so we can properly weight blocks added (outside the cloned loop blocks). unsigned depth = optLoopDepth(loopInd); weight_t ambientWeight = 1; for (unsigned j = 0; j < depth; j++) { weight_t lastWeight = ambientWeight; ambientWeight *= BB_LOOP_WEIGHT_SCALE; assert(ambientWeight > lastWeight); } // If we're in a non-natural loop, the ambient weight might be higher than we computed above. // Be safe by taking the max with the head block's weight. ambientWeight = max(ambientWeight, loop.lpHead->bbWeight); // We assume that the fast path will run 99% of the time, and thus should get 99% of the block weights. // The slow path will, correspondingly, get only 1% of the block weights. It could be argued that we should // mark the slow path as "run rarely", since it really shouldn't execute (given the currently optimized loop // conditions) except under exceptional circumstances. const weight_t fastPathWeightScaleFactor = 0.99; const weight_t slowPathWeightScaleFactor = 1.0 - fastPathWeightScaleFactor; // This is the containing loop, if any -- to label any blocks we create that are outside // the loop being cloned. unsigned char ambientLoop = loop.lpParent; // First, make sure that the loop has a unique header block, creating an empty one if necessary. optEnsureUniqueHead(loopInd, ambientWeight); // We're going to transform this loop: // // H --> E (or, H conditionally branches around the loop and has fall-through to T == E) // T // E // B ?-> T // X // // to this pair of loops: // // H ?-> H3 (all loop failure conditions branch to new slow path loop head) // H2--> E (Optional; if T == E, let H fall through to T/E) // T // E // B ?-> T // X2--> X // H3 --> E2 (aka slowHead. Or, H3 falls through to T2 == E2) // T2 // E2 // B2 ?-> T2 // X BasicBlock* h = loop.lpHead; if (!h->KindIs(BBJ_NONE, BBJ_ALWAYS)) { // Make a new block to be the unique entry to the loop. JITDUMP("Create new unique single-successor entry to loop\n"); assert((h->bbJumpKind == BBJ_COND) && (h->bbNext == loop.lpEntry)); BasicBlock* newH = fgNewBBafter(BBJ_NONE, h, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " after " FMT_BB "\n", newH->bbNum, h->bbNum); newH->bbWeight = newH->isRunRarely() ? BB_ZERO_WEIGHT : ambientWeight; BlockSetOps::Assign(this, newH->bbReach, h->bbReach); // This is in the scope of a surrounding loop, if one exists -- the parent of the loop we're cloning. newH->bbNatLoopNum = ambientLoop; optUpdateLoopHead(loopInd, h, newH); fgAddRefPred(newH, h); // Add h->newH pred edge JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", h->bbNum, newH->bbNum); fgReplacePred(newH->bbNext, h, newH); // Replace pred in COND fall-through block. JITDUMP("Replace " FMT_BB " -> " FMT_BB " with " FMT_BB " -> " FMT_BB "\n", h->bbNum, newH->bbNext->bbNum, newH->bbNum, newH->bbNext->bbNum); h = newH; } assert(h == loop.lpHead); // Make X2 after B, if necessary. (Not necessary if B is a BBJ_ALWAYS.) // "newPred" will be the predecessor of the blocks of the cloned loop. BasicBlock* b = loop.lpBottom; BasicBlock* newPred = b; if (b->bbJumpKind != BBJ_ALWAYS) { assert(b->bbJumpKind == BBJ_COND); BasicBlock* x = b->bbNext; if (x != nullptr) { JITDUMP("Create branch around cloned loop\n"); BasicBlock* x2 = fgNewBBafter(BBJ_ALWAYS, b, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " after " FMT_BB "\n", x2->bbNum, b->bbNum); x2->bbWeight = x2->isRunRarely() ? BB_ZERO_WEIGHT : ambientWeight; // This is in the scope of a surrounding loop, if one exists -- the parent of the loop we're cloning. x2->bbNatLoopNum = ambientLoop; x2->bbJumpDest = x; BlockSetOps::Assign(this, x2->bbReach, h->bbReach); fgAddRefPred(x2, b); // Add b->x2 pred edge JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", b->bbNum, x2->bbNum); fgReplacePred(x, b, x2); // The pred of x is now x2, not the fall-through of COND b. JITDUMP("Replace " FMT_BB " -> " FMT_BB " with " FMT_BB " -> " FMT_BB "\n", b->bbNum, x->bbNum, x2->bbNum, x->bbNum); newPred = x2; } } // We're going to create a new loop head for the slow loop immediately before the slow loop itself. All failed // conditions will branch to the slow head. The slow head will either fall through to the entry, or unconditionally // branch to the slow path entry. This puts the slow loop in the canonical loop form. BasicBlock* slowHeadPrev = newPred; // Now we'll make "h2", after "h" to go to "e" -- unless the loop is a do-while, // so that "h" already falls through to "e" (e == t). // It might look like this code is unreachable, since "h" must be a BBJ_ALWAYS, but // later we will change "h" to a BBJ_COND along with a set of loop conditions. // TODO: it still might be unreachable, since cloning currently is restricted to "do-while" loop forms. BasicBlock* h2 = nullptr; if (h->bbNext != loop.lpEntry) { assert(h->bbJumpKind == BBJ_ALWAYS); JITDUMP("Create branch to entry of optimized loop\n"); BasicBlock* h2 = fgNewBBafter(BBJ_ALWAYS, h, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " after " FMT_BB "\n", h2->bbNum, h->bbNum); h2->bbWeight = h2->isRunRarely() ? BB_ZERO_WEIGHT : ambientWeight; // This is in the scope of a surrounding loop, if one exists -- the parent of the loop we're cloning. h2->bbNatLoopNum = ambientLoop; h2->bbJumpDest = loop.lpEntry; fgReplacePred(loop.lpEntry, h, h2); JITDUMP("Replace " FMT_BB " -> " FMT_BB " with " FMT_BB " -> " FMT_BB "\n", h->bbNum, loop.lpEntry->bbNum, h2->bbNum, loop.lpEntry->bbNum); optUpdateLoopHead(loopInd, h, h2); // NOTE: 'h' is no longer the loop head; 'h2' is! } // Now we'll clone the blocks of the loop body. These cloned blocks will be the slow path. BasicBlock* newFirst = nullptr; BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone)); for (BasicBlock* const blk : loop.LoopBlocks()) { BasicBlock* newBlk = fgNewBBafter(blk->bbJumpKind, newPred, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " (copy of " FMT_BB ") after " FMT_BB "\n", newBlk->bbNum, blk->bbNum, newPred->bbNum); // Call CloneBlockState to make a copy of the block's statements (and attributes), and assert that it // has a return value indicating success, because optCanOptimizeByLoopCloningVisitor has already // checked them to guarantee they are clonable. bool cloneOk = BasicBlock::CloneBlockState(this, newBlk, blk); noway_assert(cloneOk); // We're going to create the preds below, which will set the bbRefs properly, // so clear out the cloned bbRefs field. newBlk->bbRefs = 0; newBlk->scaleBBWeight(slowPathWeightScaleFactor); blk->scaleBBWeight(fastPathWeightScaleFactor); // TODO: scale the pred edges of `blk`? #if FEATURE_LOOP_ALIGN // If the original loop is aligned, do not align the cloned loop because cloned loop will be executed in // rare scenario. Additionally, having to align cloned loop will force us to disable some VEX prefix encoding // and adding compensation for over-estimated instructions. if (blk->isLoopAlign()) { newBlk->bbFlags &= ~BBF_LOOP_ALIGN; JITDUMP("Removing LOOP_ALIGN flag from cloned loop in " FMT_BB "\n", newBlk->bbNum); } #endif // TODO-Cleanup: The above clones the bbNatLoopNum, which is incorrect. Eventually, we should probably insert // the cloned loop in the loop table. For now, however, we'll just make these blocks be part of the surrounding // loop, if one exists -- the parent of the loop we're cloning. newBlk->bbNatLoopNum = loop.lpParent; if (newFirst == nullptr) { newFirst = newBlk; } newPred = newBlk; blockMap->Set(blk, newBlk); } // Perform the static optimizations on the fast path. optPerformStaticOptimizations(loopInd, context DEBUGARG(true)); // Now go through the new blocks, remapping their jump targets within the loop // and updating the preds lists. for (BasicBlock* const blk : loop.LoopBlocks()) { BasicBlock* newblk = nullptr; bool b = blockMap->Lookup(blk, &newblk); assert(b && newblk != nullptr); assert(blk->bbJumpKind == newblk->bbJumpKind); // First copy the jump destination(s) from "blk". optCopyBlkDest(blk, newblk); // Now redirect the new block according to "blockMap". optRedirectBlock(newblk, blockMap); // Add predecessor edges for the new successors, as well as the fall-through paths. switch (newblk->bbJumpKind) { case BBJ_NONE: fgAddRefPred(newblk->bbNext, newblk); break; case BBJ_ALWAYS: case BBJ_CALLFINALLY: fgAddRefPred(newblk->bbJumpDest, newblk); break; case BBJ_COND: fgAddRefPred(newblk->bbNext, newblk); fgAddRefPred(newblk->bbJumpDest, newblk); break; case BBJ_SWITCH: for (BasicBlock* const switchDest : newblk->SwitchTargets()) { fgAddRefPred(switchDest, newblk); } break; default: break; } } #ifdef DEBUG // Display the preds for the new blocks, after all the new blocks have been redirected. JITDUMP("Preds after loop copy:\n"); for (BasicBlock* const blk : loop.LoopBlocks()) { BasicBlock* newblk = nullptr; bool b = blockMap->Lookup(blk, &newblk); assert(b && newblk != nullptr); JITDUMP(FMT_BB ":", newblk->bbNum); for (BasicBlock* const predBlock : newblk->PredBlocks()) { JITDUMP(" " FMT_BB, predBlock->bbNum); } JITDUMP("\n"); } #endif // DEBUG // Insert the loop choice conditions. We will create the following structure: // // h (fall through) // !cond0 -?> slowHead // !cond1 -?> slowHead // ... // !condn -?> slowHead // h2/entry (fast) // ... // slowHead -?> e2 (slowHead) branch or fall-through to e2 // // We should always have block conditions; at the minimum, the array should be deref-able. assert(context->HasBlockConditions(loopInd)); if (h->bbJumpKind == BBJ_NONE) { assert(h->bbNext == loop.lpEntry); fgRemoveRefPred(h->bbNext, h); } else { assert(h->bbJumpKind == BBJ_ALWAYS); assert(h->bbJumpDest == loop.lpEntry); assert(h2 != nullptr); h->bbJumpKind = BBJ_NONE; h->bbJumpDest = nullptr; } // If any condition is false, go to slowHead (which branches or falls through to e2). BasicBlock* e2 = nullptr; bool foundIt = blockMap->Lookup(loop.lpEntry, &e2); assert(foundIt && e2 != nullptr); // Create a unique header for the slow path. JITDUMP("Create unique head block for slow path loop\n"); BasicBlock* slowHead = fgNewBBafter(BBJ_NONE, slowHeadPrev, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " after " FMT_BB "\n", slowHead->bbNum, slowHeadPrev->bbNum); slowHead->bbWeight = slowHeadPrev->isRunRarely() ? BB_ZERO_WEIGHT : ambientWeight; slowHead->scaleBBWeight(slowPathWeightScaleFactor); slowHead->bbNatLoopNum = ambientLoop; if (slowHead->bbNext != e2) { // We can't just fall through to the slow path entry, so make it an unconditional branch. slowHead->bbJumpKind = BBJ_ALWAYS; slowHead->bbJumpDest = e2; } fgAddRefPred(e2, slowHead); JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", slowHead->bbNum, e2->bbNum); BasicBlock* condLast = optInsertLoopChoiceConditions(context, loopInd, slowHead, h); // Add the fall-through path pred (either to T/E for fall-through from conditions to fast path, // or H2 if branch to E of fast path). assert(condLast->bbJumpKind == BBJ_COND); JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", condLast->bbNum, condLast->bbNext->bbNum); fgAddRefPred(condLast->bbNext, condLast); // If h2 is present it is already the head. Else, replace 'h' as the loop head by 'condLast'. if (h2 == nullptr) { optUpdateLoopHead(loopInd, loop.lpHead, condLast); } // Don't unroll loops that we've cloned -- the unroller expects any loop it should unroll to // initialize the loop counter immediately before entering the loop, but we've left a shared // initialization of the loop counter up above the test that determines which version of the // loop to take. loop.lpFlags |= LPFLG_DONT_UNROLL; } //------------------------------------------------------------------------- // optIsStackLocalInvariant: Is stack local invariant in loop. // // Arguments: // loopNum The loop in which the variable is tested for invariance. // lclNum The local that is tested for invariance in the loop. // // Return Value: // Returns true if the variable is loop invariant in loopNum. // bool Compiler::optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum) { if (lvaVarAddrExposed(lclNum)) { return false; } if (optIsVarAssgLoop(loopNum, lclNum)) { return false; } return true; } //--------------------------------------------------------------------------------------------------------------- // optExtractArrIndex: Try to extract the array index from "tree". // // Arguments: // tree the tree to be checked if it is the array [] operation. // result the extracted GT_INDEX information is updated in result. // lhsNum for the root level (function is recursive) callers should pass BAD_VAR_NUM. // // Return Value: // Returns true if array index can be extracted, else, return false. See assumption about // what will be extracted. The "result" variable's rank parameter is advanced for every // dimension of [] encountered. // // Operation: // Given a "tree" extract the GT_INDEX node in "result" as ArrIndex. In morph // we have converted a GT_INDEX tree into a scaled index base offset expression. // However, we don't actually bother to parse the morphed tree. All we care about is // the bounds check node: it contains the array base and element index. The other side // of the COMMA node can vary between array of primitive type and array of struct. There's // no need to parse that, as the array bounds check contains the only thing we care about. // In particular, we are trying to find bounds checks to remove, so only looking at the bounds // check makes sense. We could verify that the bounds check is against the same array base/index // but it isn't necessary. // // Assumption: // The method extracts only if the array base and indices are GT_LCL_VAR. // // TODO-CQ: CLONE: After morph make sure this method extracts values before morph. // // Example tree to pattern match: // // * COMMA int // +--* BOUNDS_CHECK_Rng void // | +--* LCL_VAR int V02 loc1 // | \--* ARR_LENGTH int // | \--* LCL_VAR ref V00 arg0 // \--* IND int // \--* ADD byref // +--* LCL_VAR ref V00 arg0 // \--* ADD long // +--* LSH long // | +--* CAST long <- int // | | \--* LCL_VAR int V02 loc1 // | \--* CNS_INT long 2 // \--* CNS_INT long 16 Fseq[#FirstElem] // // Note that byte arrays don't require the LSH to scale the index, so look like this: // // * COMMA ubyte // +--* BOUNDS_CHECK_Rng void // | +--* LCL_VAR int V03 loc2 // | \--* ARR_LENGTH int // | \--* LCL_VAR ref V00 arg0 // \--* IND ubyte // \--* ADD byref // +--* LCL_VAR ref V00 arg0 // \--* ADD long // +--* CAST long <- int // | \--* LCL_VAR int V03 loc2 // \--* CNS_INT long 16 Fseq[#FirstElem] // // The COMMA op2 expression is the array index expression (or SIMD/Span expression). If we've got // a "LCL_VAR int" index and "ARR_LENGTH(LCL_VAR ref)", that's good enough for us: we'll assume // op2 is an array index expression. We don't need to match it just to ensure the index var is // used as an index expression, or array base var is used as the array base. This saves us from parsing // all the forms that morph can create, especially for arrays of structs. // bool Compiler::optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum) { if (tree->gtOper != GT_COMMA) { return false; } GenTree* before = tree->gtGetOp1(); if (!before->OperIs(GT_BOUNDS_CHECK)) { return false; } GenTreeBoundsChk* arrBndsChk = before->AsBoundsChk(); if (arrBndsChk->GetIndex()->gtOper != GT_LCL_VAR) { return false; } // For span we may see the array length is a local var or local field or constant. // We won't try and extract those. if (arrBndsChk->GetArrayLength()->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_CNS_INT)) { return false; } if (arrBndsChk->GetArrayLength()->gtGetOp1()->gtOper != GT_LCL_VAR) { return false; } unsigned arrLcl = arrBndsChk->GetArrayLength()->gtGetOp1()->AsLclVarCommon()->GetLclNum(); if (lhsNum != BAD_VAR_NUM && arrLcl != lhsNum) { return false; } unsigned indLcl = arrBndsChk->GetIndex()->AsLclVarCommon()->GetLclNum(); if (lhsNum == BAD_VAR_NUM) { result->arrLcl = arrLcl; } result->indLcls.Push(indLcl); result->bndsChks.Push(tree); result->useBlock = compCurBB; result->rank++; return true; } //--------------------------------------------------------------------------------------------------------------- // optReconstructArrIndex: Reconstruct array index. // // Arguments: // tree the tree to be checked if it is an array [][][] operation. // result OUT: the extracted GT_INDEX information. // lhsNum for the root level (function is recursive) callers should pass BAD_VAR_NUM. // // Return Value: // Returns true if array index can be extracted, else, return false. "rank" field in // "result" contains the array access depth. The "indLcls" fields contain the indices. // // Operation: // Recursively look for a list of array indices. For example, if the tree is // V03 = (V05 = V00[V01]), V05[V02] // that corresponds to access of V00[V01][V02]. The return value would then be: // ArrIndex result { arrLcl: V00, indLcls: [V01, V02], rank: 2 } // // Note that the array expression is implied by the array bounds check under the COMMA, and the array bounds // checks is what is parsed from the morphed tree; the array addressing expression is not parsed. // // Assumption: // The method extracts only if the array base and indices are GT_LCL_VAR. // bool Compiler::optReconstructArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum) { // If we can extract "tree" (which is a top level comma) return. if (optExtractArrIndex(tree, result, lhsNum)) { return true; } // We have a comma (check if array base expr is computed in "before"), descend further. else if (tree->OperGet() == GT_COMMA) { GenTree* before = tree->gtGetOp1(); // "before" should evaluate an array base for the "after" indexing. if (before->OperGet() != GT_ASG) { return false; } GenTree* lhs = before->gtGetOp1(); GenTree* rhs = before->gtGetOp2(); // "rhs" should contain an GT_INDEX if (!lhs->IsLocal() || !optReconstructArrIndex(rhs, result, lhsNum)) { return false; } unsigned lhsNum = lhs->AsLclVarCommon()->GetLclNum(); GenTree* after = tree->gtGetOp2(); // Pass the "lhsNum", so we can verify if indeed it is used as the array base. return optExtractArrIndex(after, result, lhsNum); } return false; } //---------------------------------------------------------------------------------------------- // optCanOptimizeByLoopCloning: Check if the tree can be optimized by loop cloning and if so, // identify as potential candidate and update the loop context. // // Arguments: // tree The tree encountered during the tree walk. // info Supplies information about the current block or stmt in which the tree is. // Also supplies the "context" pointer for updating with loop cloning // candidates. Also supplies loopNum. // // Operation: // If array index can be reconstructed, check if the iteration var of the loop matches the // array index var in some dimension. Also ensure other index vars before the identified // dimension are loop invariant. // // Return Value: // Skip sub trees if the optimization candidate is identified or else continue walking // Compiler::fgWalkResult Compiler::optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info) { ArrIndex arrIndex(getAllocator(CMK_LoopClone)); // Check if array index can be optimized. if (optReconstructArrIndex(tree, &arrIndex, BAD_VAR_NUM)) { assert(tree->gtOper == GT_COMMA); #ifdef DEBUG if (verbose) { printf("Found ArrIndex at " FMT_BB " " FMT_STMT " tree ", arrIndex.useBlock->bbNum, info->stmt->GetID()); printTreeID(tree); printf(" which is equivalent to: "); arrIndex.Print(); printf(", bounds check nodes: "); arrIndex.PrintBoundsCheckNodes(); printf("\n"); } #endif // Check that the array object local variable is invariant within the loop body. if (!optIsStackLocalInvariant(info->loopNum, arrIndex.arrLcl)) { JITDUMP("V%02d is not loop invariant\n", arrIndex.arrLcl); return WALK_SKIP_SUBTREES; } // Walk the dimensions and see if iterVar of the loop is used as index. for (unsigned dim = 0; dim < arrIndex.rank; ++dim) { // Is index variable also used as the loop iter var? if (arrIndex.indLcls[dim] == optLoopTable[info->loopNum].lpIterVar()) { // Check the previous indices are all loop invariant. for (unsigned dim2 = 0; dim2 < dim; ++dim2) { if (optIsVarAssgLoop(info->loopNum, arrIndex.indLcls[dim2])) { JITDUMP("V%02d is assigned in loop\n", arrIndex.indLcls[dim2]); return WALK_SKIP_SUBTREES; } } #ifdef DEBUG if (verbose) { printf("Loop " FMT_LP " can be cloned for ArrIndex ", info->loopNum); arrIndex.Print(); printf(" on dim %d\n", dim); } #endif // Update the loop context. info->context->EnsureLoopOptInfo(info->loopNum) ->Push(new (this, CMK_LoopOpt) LcJaggedArrayOptInfo(arrIndex, dim, info->stmt)); } else { JITDUMP("Induction V%02d is not used as index on dim %d\n", optLoopTable[info->loopNum].lpIterVar(), dim); } } return WALK_SKIP_SUBTREES; } else if (tree->gtOper == GT_ARR_ELEM) { // TODO-CQ: CLONE: Implement. return WALK_SKIP_SUBTREES; } return WALK_CONTINUE; } /* static */ Compiler::fgWalkResult Compiler::optCanOptimizeByLoopCloningVisitor(GenTree** pTree, Compiler::fgWalkData* data) { return data->compiler->optCanOptimizeByLoopCloning(*pTree, (LoopCloneVisitorInfo*)data->pCallbackData); } //------------------------------------------------------------------------ // optIdentifyLoopOptInfo: Identify loop optimization candidates. // Also, check if the loop is suitable for the optimizations performed. // // Arguments: // loopNum - the current loop index for which conditions are derived. // context - data structure where all loop cloning candidates will be updated. // // Return Value: // If the loop is not suitable for the optimizations, return false - context // should not contain any optimization candidate for the loop if false. // Else return true. // // Operation: // Check if the loop is well formed for this optimization and identify the // optimization candidates and update the "context" parameter with all the // contextual information necessary to perform the optimization later. // bool Compiler::optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context) { JITDUMP("Checking loop " FMT_LP " for optimization candidates\n", loopNum); const LoopDsc& loop = optLoopTable[loopNum]; LoopCloneVisitorInfo info(context, loopNum, nullptr); for (BasicBlock* const block : loop.LoopBlocks()) { compCurBB = block; for (Statement* const stmt : block->Statements()) { info.stmt = stmt; const bool lclVarsOnly = false; const bool computeStack = false; fgWalkTreePre(stmt->GetRootNodePointer(), optCanOptimizeByLoopCloningVisitor, &info, lclVarsOnly, computeStack); } } return true; } //------------------------------------------------------------------------------ // optObtainLoopCloningOpts: Identify optimization candidates and update // the "context" for array optimizations. // // Arguments: // context - data structure where all loop cloning info is kept. The // optInfo fields of the context are updated with the // identified optimization candidates. // // Returns: // true if there are any clonable loops. // bool Compiler::optObtainLoopCloningOpts(LoopCloneContext* context) { bool result = false; for (unsigned i = 0; i < optLoopCount; i++) { JITDUMP("Considering loop " FMT_LP " to clone for optimizations.\n", i); if (optIsLoopClonable(i)) { if (optIdentifyLoopOptInfo(i, context)) { result = true; } } JITDUMP("------------------------------------------------------------\n"); } JITDUMP("\n"); return result; } //---------------------------------------------------------------------------- // optLoopCloningEnabled: Determine whether loop cloning is allowed. It is allowed // in release builds. For debug builds, use the value of the COMPlus_JitCloneLoops // flag (which defaults to 1, or allowed). // // Return Value: // true if loop cloning is allowed, false if disallowed. // bool Compiler::optLoopCloningEnabled() { #ifdef DEBUG return JitConfig.JitCloneLoops() != 0; #else return true; #endif } //------------------------------------------------------------------------ // optCloneLoops: Implements loop cloning optimization. // // Identify loop cloning opportunities, derive loop cloning conditions, // perform loop cloning, use the derived conditions to choose which // path to take. // // Returns: // suitable phase status // PhaseStatus Compiler::optCloneLoops() { JITDUMP("\n*************** In optCloneLoops()\n"); if (optLoopCount == 0) { JITDUMP(" No loops to clone\n"); return PhaseStatus::MODIFIED_NOTHING; } if (!optLoopCloningEnabled()) { JITDUMP(" Loop cloning disabled\n"); return PhaseStatus::MODIFIED_NOTHING; } #ifdef DEBUG if (verbose) { printf("\nBefore loop cloning:\n"); fgDispBasicBlocks(/*dumpTrees*/ true); } #endif LoopCloneContext context(optLoopCount, getAllocator(CMK_LoopClone)); // Obtain array optimization candidates in the context. if (!optObtainLoopCloningOpts(&context)) { JITDUMP(" No clonable loops\n"); // TODO: if we can verify that the IR was not modified, we can return PhaseStatus::MODIFIED_NOTHING return PhaseStatus::MODIFIED_EVERYTHING; } unsigned optStaticallyOptimizedLoops = 0; // For each loop, derive cloning conditions for the optimization candidates. for (unsigned i = 0; i < optLoopCount; ++i) { JitExpandArrayStack<LcOptInfo*>* optInfos = context.GetLoopOptInfo(i); if (optInfos == nullptr) { continue; } if (!optDeriveLoopCloningConditions(i, &context) || !optComputeDerefConditions(i, &context)) { JITDUMP("> Conditions could not be obtained\n"); context.CancelLoopOptInfo(i); } else { bool allTrue = false; bool anyFalse = false; context.EvaluateConditions(i, &allTrue, &anyFalse DEBUGARG(verbose)); if (anyFalse) { context.CancelLoopOptInfo(i); } else if (allTrue) { // Perform static optimizations on the fast path since we always // have to take the cloned path. optPerformStaticOptimizations(i, &context DEBUGARG(false)); ++optStaticallyOptimizedLoops; // No need to clone. context.CancelLoopOptInfo(i); } } } #if 0 // The code in this #if has been useful in debugging loop cloning issues, by // enabling selective enablement of the loop cloning optimization according to // method hash. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("loopclonehashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); // methHashLo = (unsigned(atoi(lostr)) << 2); // So we don't have to use negative numbers. } char* histr = getenv("loopclonehashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); // methHashHi = (unsigned(atoi(histr)) << 2); // So we don't have to use negative numbers. } if (methHash < methHashLo || methHash > methHashHi) { return PhaseStatus::MODIFIED_EVERYTHING; } #endif #endif assert(optLoopsCloned == 0); // It should be initialized, but not yet changed. for (unsigned i = 0; i < optLoopCount; ++i) { if (context.GetLoopOptInfo(i) != nullptr) { optLoopsCloned++; context.OptimizeConditions(i DEBUGARG(verbose)); context.OptimizeBlockConditions(i DEBUGARG(verbose)); optCloneLoop(i, &context); } } if (optLoopsCloned > 0) { JITDUMP("Recompute reachability and dominators after loop cloning\n"); constexpr bool computePreds = false; // TODO: recompute the loop table, to include the slow loop path in the table? fgUpdateChangedFlowGraph(computePreds); } #ifdef DEBUG if (verbose) { printf("Loops cloned: %d\n", optLoopsCloned); printf("Loops statically optimized: %d\n", optStaticallyOptimizedLoops); printf("After loop cloning:\n"); fgDispBasicBlocks(/*dumpTrees*/ true); } fgDebugCheckLoopTable(); #endif return PhaseStatus::MODIFIED_EVERYTHING; }
1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/jit/loopcloning.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Loop Cloning XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX Loop cloning is an optimization which duplicates a loop to create two versions. One copy is optimized by hoisting out various dynamic checks, such as array bounds checks that can't be statically eliminated. The checks are dynamically run. If they fail, the original copy of the loop is executed. If they pass, the optimized copy of the loop is executed, knowing that the bounds checks are dynamically unnecessary. The optimization can reduce the amount of code executed within a loop body. For example: public static int f(int[] a, int l) { int sum = 0; for (int i = 0; i < l; i++) { sum += a[i]; // This array bounds check must be executed in the loop } } This can be transformed to (in pseudo-code): public static int f(int[] a, int l) { int sum = 0; if (a != null && l <= a.Length) { for (int i = 0; i < l; i++) { sum += a[i]; // no bounds check needed } } else { for (int i = 0; i < l; i++) { // bounds check needed. We need to do the normal computation (esp., side effects) before the exception occurs. sum += a[i]; } } } One generalization of this is "loop unswitching". Because code is duplicated, this is a code size expanding optimization, and therefore we need to be careful to avoid duplicating too much code unnecessarily. Also, there is a risk that we can duplicate the loops and later, downstream phases optimize away the bounds checks even on the un-optimized copy of the loop. Loop cloning is implemented with the following steps: 1. Loop detection logic, which is existing logic in the JIT that records loop information with loop flags. 2. Identify loop optimization candidates. This is done by optObtainLoopCloningOpts. The loop context variable is updated with all the necessary information (for example: block, stmt, tree information) to do the optimization later. a) This involves checking if the loop is well-formed with respect to the optimization being performed. b) In array bounds check case, reconstructing the morphed GT_INDEX nodes back to their array representation. i) The array index is stored in the "context" variable with additional block, tree, stmt info. 3. Once the optimization candidates are identified, we derive cloning conditions. For example: to clone a simple "for (i=0; i<n; ++i) { a[i] }" loop, we need the following conditions: (a != null) && (n >= 0) && (n <= a.length) && (stride > 0) Note that "&&" implies a short-circuiting operator. This requires each condition to be in its own block with its own comparison and branch instruction. This can be optimized if there are no dependent conditions in a block by using a bitwise AND instead of a short-circuit AND. The (a != null) condition needs to occur before "a.length" is checked. But otherwise, the last three conditions can be computed in the same block, as: (a != null) && ((n >= 0) & (n <= a.length) & (stride > 0)) Since we're optimizing for the expected fast path case, where all the conditions are true, we expect all the conditions to be executed most of the time. Thus, it is advantageous to make as many as possible non-short-circuiting to reduce the number of compare/branch/blocks needed. In the above case, stride == 1, so we statically know stride > 0. If we had "for (i=0; i<=n; ++i) { a[i] }", we would need: (a != null) && (n >= 0) && (a.length >= 1) && (n <= a.length - 1) && (stride > 0) This is more complicated. The loop is equivalent (except for possible overflow) to: for (i=0; i<n+1; ++i) { a[i] }" (`n+1` due to the `++i` stride). We'd have to worry about overflow doing this conversion, though. REVIEW: why do we need the (n >= 0) condition? We do need to know "array index var initialization value >= array lower bound (0)". a) Conditions that need to be in their own blocks to enable short-circuit are called block conditions or deref-conditions. i) For a doubly nested loop on i, j, we would then have conditions like (a != null) && (i < a.len) && (a[i] != null) && (j < a[i].len) all short-circuiting creating blocks. Advantage: All conditions are checked before we enter the fast path. So fast path gets as fast as it can be. Disadvantage: Creation of blocks. Heuristic: Therefore we will not clone if we exceed creating 4 blocks. Note: this means we never clone more than 2-dimension a[i][j] expressions (see optComputeDerefConditions()). REVIEW: make this heuristic defined by a COMPlus variable, for easier experimentation, and make it more dynamic and based on potential benefit? b) The other conditions called cloning conditions are transformed into LC_Condition structs which are then optimized. i) Optimization of conditions involves removing redundant condition checks. ii) If some conditions evaluate to true statically, then they are removed. iii) If any condition evaluates to false statically, then loop cloning is aborted for that loop. 4. Then the block splitting occurs and loop cloning conditions are transformed into GenTree and added to the loop cloning choice block (the block that determines which copy of the loop is executed). Preconditions 1. Loop detection has completed and the loop table is populated. 2. The loops that will be considered are the ones with the LPFLG_ITER flag: "for (i = icon or lclVar; test_condition(); i++)" Limitations 1. For array based optimizations the loop choice condition is checked before the loop body. This implies that the loop initializer statement has not executed at the time of the check. So any loop cloning condition involving the initial value of the loop counter cannot be condition checked as it hasn't been assigned yet at the time of condition checking. Therefore the initial value has to be statically known. This can be fixed with further effort. 2. Loops containing nested exception handling regions are not cloned. (Cloning them would require creating new exception handling regions for the cloned loop, which is "hard".) There are a few other EH-related edge conditions that also cause us to reject cloning. 3. If the loop contains RETURN blocks, and cloning those would push us over the maximum number of allowed RETURN blocks in the function (either due to GC info encoding limitations or otherwise), we reject cloning. 4. Loop increment must be `i += 1` 5. Loop test must be `i < x` where `x` is a constant, a variable, or `a.Length` for array `a` (There is some implementation support for decrementing loops, but it is incomplete. There is some implementation support for `i <= x` conditions, but it is incomplete (Compiler::optDeriveLoopCloningConditions() only handles GT_LT conditions)) 6. Loop must have been converted to a do-while form. 7. There are a few other loop well-formedness conditions. 8. Multi-dimensional (non-jagged) loop index checking is only partially implemented. 9. Constant initializations and constant limits must be non-negative (REVIEW: why? The implementation does use `unsigned` to represent them.) 10. The cloned loop (the slow path) is not added to the loop table, meaning certain downstream optimization passes do not see them. See https://github.com/dotnet/runtime/issues/43713. Assumptions 1. The assumption is that the optimization candidates collected during the identification phase will be the ones that will be optimized. In other words, the loop that is present originally will be the fast path. The cloned path will be the slow path and will be unoptimized. This allows us to collect additional information at the same time as identifying the optimization candidates. This later helps us to perform the optimizations during actual cloning. 2. All loop cloning choice conditions will automatically be "AND"-ed. These are bitwise AND operations. 3. Perform short circuit AND for (array != null) side effect check before hoisting (limit <= a.length) check. */ #pragma once class Compiler; /** * * Represents an array access and associated bounds checks. * Array access is required to have the array and indices in local variables. * This struct is constructed using a GT_INDEX node that is broken into * its sub trees. * */ struct ArrIndex { unsigned arrLcl; // The array base local num JitExpandArrayStack<unsigned> indLcls; // The indices local nums JitExpandArrayStack<GenTree*> bndsChks; // The bounds checks nodes along each dimension. unsigned rank; // Rank of the array BasicBlock* useBlock; // Block where the [] occurs ArrIndex(CompAllocator alloc) : arrLcl(BAD_VAR_NUM), indLcls(alloc), bndsChks(alloc), rank(0), useBlock(nullptr) { } #ifdef DEBUG void Print(unsigned dim = -1); void PrintBoundsCheckNodes(unsigned dim = -1); #endif }; // Forward declarations #define LC_OPT(en) struct en##OptInfo; #include "loopcloningopts.h" /** * * LcOptInfo represents the optimization information for loop cloning, * other classes are supposed to derive from this base class. * * Example usage: * * LcMdArrayOptInfo is multi-dimensional array optimization for which the * loop can be cloned. * * LcArrIndexOptInfo is a jagged array optimization for which the loop * can be cloned. * * So LcOptInfo represents any type of optimization opportunity that * occurs in a loop and the metadata for the optimization is stored in * this class. */ struct LcOptInfo { enum OptType { #define LC_OPT(en) en, #include "loopcloningopts.h" }; OptType optType; LcOptInfo(OptType optType) : optType(optType) { } OptType GetOptType() { return optType; } #define LC_OPT(en) \ en##OptInfo* As##en##OptInfo() \ { \ assert(optType == en); \ return reinterpret_cast<en##OptInfo*>(this); \ } #include "loopcloningopts.h" }; /** * * Optimization info for a multi-dimensional array. */ struct LcMdArrayOptInfo : public LcOptInfo { GenTreeArrElem* arrElem; // "arrElem" node of an MD array. unsigned dim; // "dim" represents up to what level of the rank this optimization applies to. // For example, a[i,j,k] could be the MD array "arrElem" but if "dim" is 2, // then this node is treated as though it were a[i,j] ArrIndex* index; // "index" cached computation in the form of an ArrIndex representation. LcMdArrayOptInfo(GenTreeArrElem* arrElem, unsigned dim) : LcOptInfo(LcMdArray), arrElem(arrElem), dim(dim), index(nullptr) { } ArrIndex* GetArrIndexForDim(CompAllocator alloc) { if (index == nullptr) { index = new (alloc) ArrIndex(alloc); index->rank = arrElem->gtArrRank; for (unsigned i = 0; i < dim; ++i) { index->indLcls.Push(arrElem->gtArrInds[i]->AsLclVarCommon()->GetLclNum()); } index->arrLcl = arrElem->gtArrObj->AsLclVarCommon()->GetLclNum(); } return index; } }; /** * * Optimization info for a jagged array. */ struct LcJaggedArrayOptInfo : public LcOptInfo { unsigned dim; // "dim" represents up to what level of the rank this optimization applies to. // For example, a[i][j][k] could be the jagged array but if "dim" is 2, // then this node is treated as though it were a[i][j] ArrIndex arrIndex; // ArrIndex representation of the array. Statement* stmt; // "stmt" where the optimization opportunity occurs. LcJaggedArrayOptInfo(ArrIndex& arrIndex, unsigned dim, Statement* stmt) : LcOptInfo(LcJaggedArray), dim(dim), arrIndex(arrIndex), stmt(stmt) { } }; /** * * Symbolic representation of a.length, or a[i][j].length or a[i,j].length and so on. * OperType decides whether "arrLength" is invoked on the array or if it is just an array. */ struct LC_Array { enum ArrType { Invalid, Jagged, MdArray }; enum OperType { None, ArrLen, }; ArrType type; // The type of the array on which to invoke length operator. ArrIndex* arrIndex; // ArrIndex representation of this array. OperType oper; #ifdef DEBUG void Print() { arrIndex->Print(dim); if (oper == ArrLen) { printf(".Length"); } } #endif int dim; // "dim" = which index to invoke arrLen on, if -1 invoke on the whole array // Example 1: a[0][1][2] and dim = 2 implies a[0][1].length // Example 2: a[0][1][2] and dim = -1 implies a[0][1][2].length LC_Array() : type(Invalid), dim(-1) { } LC_Array(ArrType type, ArrIndex* arrIndex, int dim, OperType oper) : type(type), arrIndex(arrIndex), oper(oper), dim(dim) { } LC_Array(ArrType type, ArrIndex* arrIndex, OperType oper) : type(type), arrIndex(arrIndex), oper(oper), dim(-1) { } // Equality operator bool operator==(const LC_Array& that) const { assert(type != Invalid && that.type != Invalid); // Types match and the array base matches. if (type != that.type || arrIndex->arrLcl != that.arrIndex->arrLcl || oper != that.oper) { return false; } // If the dim ranks are not matching, quit. int rank1 = GetDimRank(); int rank2 = that.GetDimRank(); if (rank1 != rank2) { return false; } // Check for the indices. for (int i = 0; i < rank1; ++i) { if (arrIndex->indLcls[i] != that.arrIndex->indLcls[i]) { return false; } } return true; } // The max dim on which length is invoked. int GetDimRank() const { return (dim < 0) ? (int)arrIndex->rank : dim; } // Get a tree representation for this symbolic a.length GenTree* ToGenTree(Compiler* comp, BasicBlock* bb); }; /** * * Symbolic representation of either a constant like 1 or 2, or a variable like V02 or V03, or an "LC_Array", * or the null constant. */ struct LC_Ident { enum IdentType { Invalid, Const, Var, ArrLen, Null, }; LC_Array arrLen; // The LC_Array if the type is "ArrLen" unsigned constant; // The constant value if this node is of type "Const", or the lcl num if "Var" IdentType type; // The type of this object // Equality operator bool operator==(const LC_Ident& that) const { switch (type) { case Const: case Var: return (type == that.type) && (constant == that.constant); case ArrLen: return (type == that.type) && (arrLen == that.arrLen); case Null: return (type == that.type); default: assert(!"Unknown LC_Ident type"); unreached(); } } #ifdef DEBUG void Print() { switch (type) { case Const: printf("%u", constant); break; case Var: printf("V%02d", constant); break; case ArrLen: arrLen.Print(); break; case Null: printf("null"); break; default: printf("INVALID"); break; } } #endif LC_Ident() : type(Invalid) { } LC_Ident(unsigned constant, IdentType type) : constant(constant), type(type) { } explicit LC_Ident(IdentType type) : type(type) { } explicit LC_Ident(const LC_Array& arrLen) : arrLen(arrLen), type(ArrLen) { } // Convert this symbolic representation into a tree node. GenTree* ToGenTree(Compiler* comp, BasicBlock* bb); }; /** * * Symbolic representation of an expr that involves an "LC_Ident" */ struct LC_Expr { enum ExprType { Invalid, Ident, }; LC_Ident ident; ExprType type; // Equality operator bool operator==(const LC_Expr& that) const { assert(type != Invalid && that.type != Invalid); // If the types don't match quit. if (type != that.type) { return false; } // Check if the ident match. return (ident == that.ident); } #ifdef DEBUG void Print() { if (type == Ident) { ident.Print(); } else { printf("INVALID"); } } #endif LC_Expr() : type(Invalid) { } explicit LC_Expr(const LC_Ident& ident) : ident(ident), type(Ident) { } // Convert LC_Expr into a tree node. GenTree* ToGenTree(Compiler* comp, BasicBlock* bb); }; /** * * Symbolic representation of a conditional operation involving two "LC_Expr": * LC_Expr < LC_Expr, for example: i > 0, i < a.length */ struct LC_Condition { LC_Expr op1; LC_Expr op2; genTreeOps oper; #ifdef DEBUG void Print() { op1.Print(); printf(" %s ", GenTree::OpName(oper)); op2.Print(); } #endif // Check if the condition evaluates statically to true or false, i < i => false, a.length > 0 => true // The result is put in "pResult" parameter and is valid if the method returns "true". Otherwise, the // condition could not be evaluated. bool Evaluates(bool* pResult); // Check if two conditions can be combined to yield one condition. bool Combines(const LC_Condition& cond, LC_Condition* newCond); LC_Condition() { } LC_Condition(genTreeOps oper, const LC_Expr& op1, const LC_Expr& op2) : op1(op1), op2(op2), oper(oper) { } // Convert this conditional operation into a GenTree. GenTree* ToGenTree(Compiler* comp, BasicBlock* bb, bool invert); }; /** * A deref tree of an array expression. * a[i][j][k], b[i] and a[i][y][k] are the occurrences in the loop, then, the tree would be: * a => { * i => { * j => { * k => {} * }, * y => { * k => {} * }, * } * }, * b => { * i => {} * } */ struct LC_Deref { const LC_Array array; JitExpandArrayStack<LC_Deref*>* children; unsigned level; LC_Deref(const LC_Array& array, unsigned level) : array(array), children(nullptr), level(level) { } LC_Deref* Find(unsigned lcl); unsigned Lcl(); bool HasChildren(); void EnsureChildren(CompAllocator alloc); static LC_Deref* Find(JitExpandArrayStack<LC_Deref*>* children, unsigned lcl); void DeriveLevelConditions(JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* len); #ifdef DEBUG void Print(unsigned indent = 0) { unsigned tab = 4 * indent; printf("%*sV%02d, level %d => {", tab, "", Lcl(), level); if (children != nullptr) { for (unsigned i = 0; i < children->Size(); ++i) { if (i > 0) { printf(","); } printf("\n"); #ifdef _MSC_VER (*children)[i]->Print(indent + 1); #else // _MSC_VER (*((JitExpandArray<LC_Deref*>*)children))[i]->Print(indent + 1); #endif // _MSC_VER } } printf("\n%*s}", tab, ""); } #endif }; /** * * The "context" represents data that is used for making loop-cloning decisions. * - The data is the collection of optimization opportunities * - and the conditions (LC_Condition) that decide between the fast * path or the slow path. * * BNF for LC_Condition: * LC_Condition : LC_Expr genTreeOps LC_Expr * LC_Expr : LC_Ident | LC_Ident + Constant * LC_Ident : Constant | Var | LC_Array * LC_Array : . * genTreeOps : GT_GE | GT_LE | GT_GT | GT_LT * */ struct LoopCloneContext { CompAllocator alloc; // The allocator // The array of optimization opportunities found in each loop. (loop x optimization-opportunities) jitstd::vector<JitExpandArrayStack<LcOptInfo*>*> optInfo; // The array of conditions that influence which path to take for each loop. (loop x cloning-conditions) jitstd::vector<JitExpandArrayStack<LC_Condition>*> conditions; // The array of dereference conditions found in each loop. (loop x deref-conditions) jitstd::vector<JitExpandArrayStack<LC_Array>*> derefs; // The array of block levels of conditions for each loop. (loop x level x conditions) jitstd::vector<JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>*> blockConditions; LoopCloneContext(unsigned loopCount, CompAllocator alloc) : alloc(alloc), optInfo(alloc), conditions(alloc), derefs(alloc), blockConditions(alloc) { optInfo.resize(loopCount, nullptr); conditions.resize(loopCount, nullptr); derefs.resize(loopCount, nullptr); blockConditions.resize(loopCount, nullptr); } // Evaluate conditions into a JTRUE stmt and put it in a new block after `insertAfter`. BasicBlock* CondToStmtInBlock(Compiler* comp, JitExpandArrayStack<LC_Condition>& conds, BasicBlock* slowHead, BasicBlock* insertAfter); // Get all the optimization information for loop "loopNum"; this information is held in "optInfo" array. // If NULL this allocates the optInfo[loopNum] array for "loopNum". JitExpandArrayStack<LcOptInfo*>* EnsureLoopOptInfo(unsigned loopNum); // Get all the optimization information for loop "loopNum"; this information is held in "optInfo" array. // If NULL this does not allocate the optInfo[loopNum] array for "loopNum". JitExpandArrayStack<LcOptInfo*>* GetLoopOptInfo(unsigned loopNum); // Cancel all optimizations for loop "loopNum" by clearing out the "conditions" member if non-null // and setting the optInfo to "null". If "null", then the user of this class is not supposed to // clone this loop. void CancelLoopOptInfo(unsigned loopNum); // Get the conditions that decide which loop to take for "loopNum." If NULL allocate an empty array. JitExpandArrayStack<LC_Condition>* EnsureConditions(unsigned loopNum); // Get the conditions for loop. No allocation is performed. JitExpandArrayStack<LC_Condition>* GetConditions(unsigned loopNum); // Ensure that the "deref" conditions array is allocated. JitExpandArrayStack<LC_Array>* EnsureDerefs(unsigned loopNum); // Get block conditions for each loop, no allocation is performed. JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* GetBlockConditions(unsigned loopNum); // Ensure that the block condition is present, if not allocate space. JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* EnsureBlockConditions(unsigned loopNum, unsigned totalBlocks); #ifdef DEBUG // Print the block conditions for the loop. void PrintBlockConditions(unsigned loopNum); void PrintBlockLevelConditions(unsigned level, JitExpandArrayStack<LC_Condition>* levelCond); #endif // Does the loop have block conditions? bool HasBlockConditions(unsigned loopNum); // Evaluate the conditions for "loopNum" and indicate if they are either all true or any of them are false. // // `pAllTrue` and `pAnyFalse` are OUT parameters. // // If `*pAllTrue` is `true`, then all the conditions are statically known to be true. // The caller doesn't need to clone the loop, but it can perform fast path optimizations. // // If `*pAnyFalse` is `true`, then at least one condition is statically known to be false. // The caller needs to abort cloning the loop (neither clone nor fast path optimizations.) // // If neither `*pAllTrue` nor `*pAnyFalse` is true, then the evaluation of some conditions are statically unknown. // // Assumes the conditions involve an AND join operator. void EvaluateConditions(unsigned loopNum, bool* pAllTrue, bool* pAnyFalse DEBUGARG(bool verbose)); private: void OptimizeConditions(JitExpandArrayStack<LC_Condition>& conds); public: // Optimize conditions to remove redundant conditions. void OptimizeConditions(unsigned loopNum DEBUGARG(bool verbose)); void OptimizeBlockConditions(unsigned loopNum DEBUGARG(bool verbose)); #ifdef DEBUG void PrintConditions(unsigned loopNum); #endif };
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Loop Cloning XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX Loop cloning is an optimization which duplicates a loop to create two versions. One copy is optimized by hoisting out various dynamic checks, such as array bounds checks that can't be statically eliminated. The checks are dynamically run. If they fail, the original copy of the loop is executed. If they pass, the optimized copy of the loop is executed, knowing that the bounds checks are dynamically unnecessary. The optimization can reduce the amount of code executed within a loop body. For example: public static int f(int[] a, int l) { int sum = 0; for (int i = 0; i < l; i++) { sum += a[i]; // This array bounds check must be executed in the loop } } This can be transformed to (in pseudo-code): public static int f(int[] a, int l) { int sum = 0; if (a != null && l <= a.Length) { for (int i = 0; i < l; i++) { sum += a[i]; // no bounds check needed } } else { for (int i = 0; i < l; i++) { // bounds check needed. We need to do the normal computation (esp., side effects) before the exception occurs. sum += a[i]; } } } One generalization of this is "loop unswitching". Because code is duplicated, this is a code size expanding optimization, and therefore we need to be careful to avoid duplicating too much code unnecessarily. Also, there is a risk that we can duplicate the loops and later, downstream phases optimize away the bounds checks even on the un-optimized copy of the loop. Loop cloning is implemented with the following steps: 1. Loop detection logic, which is existing logic in the JIT that records loop information with loop flags. 2. Identify loop optimization candidates. This is done by optObtainLoopCloningOpts. The loop context variable is updated with all the necessary information (for example: block, stmt, tree information) to do the optimization later. a) This involves checking if the loop is well-formed with respect to the optimization being performed. b) In array bounds check case, reconstructing the morphed GT_INDEX nodes back to their array representation. i) The array index is stored in the "context" variable with additional block, tree, stmt info. 3. Once the optimization candidates are identified, we derive cloning conditions. For example: to clone a simple "for (i=0; i<n; ++i) { a[i] }" loop, we need the following conditions: (a != null) && (n >= 0) && (n <= a.length) && (stride > 0) Note that "&&" implies a short-circuiting operator. This requires each condition to be in its own block with its own comparison and branch instruction. This can be optimized if there are no dependent conditions in a block by using a bitwise AND instead of a short-circuit AND. The (a != null) condition needs to occur before "a.length" is checked. But otherwise, the last three conditions can be computed in the same block, as: (a != null) && ((n >= 0) & (n <= a.length) & (stride > 0)) Since we're optimizing for the expected fast path case, where all the conditions are true, we expect all the conditions to be executed most of the time. Thus, it is advantageous to make as many as possible non-short-circuiting to reduce the number of compare/branch/blocks needed. In the above case, stride == 1, so we statically know stride > 0. If we had "for (i=0; i<=n; ++i) { a[i] }", we would need: (a != null) && (n >= 0) && (a.length >= 1) && (n <= a.length - 1) && (stride > 0) This is more complicated. The loop is equivalent (except for possible overflow) to: for (i=0; i<n+1; ++i) { a[i] }" (`n+1` due to the `++i` stride). We'd have to worry about overflow doing this conversion, though. REVIEW: why do we need the (n >= 0) condition? We do need to know "array index var initialization value >= array lower bound (0)". a) Conditions that need to be in their own blocks to enable short-circuit are called block conditions or deref-conditions. i) For a doubly nested loop on i, j, we would then have conditions like (a != null) && (i < a.len) && (a[i] != null) && (j < a[i].len) all short-circuiting creating blocks. Advantage: All conditions are checked before we enter the fast path. So fast path gets as fast as it can be. Disadvantage: Creation of blocks. Heuristic: Therefore we will not clone if we exceed creating 4 blocks. Note: this means we never clone more than 2-dimension a[i][j] expressions (see optComputeDerefConditions()). REVIEW: make this heuristic defined by a COMPlus variable, for easier experimentation, and make it more dynamic and based on potential benefit? b) The other conditions called cloning conditions are transformed into LC_Condition structs which are then optimized. i) Optimization of conditions involves removing redundant condition checks. ii) If some conditions evaluate to true statically, then they are removed. iii) If any condition evaluates to false statically, then loop cloning is aborted for that loop. 4. Then the block splitting occurs and loop cloning conditions are transformed into GenTree and added to the loop cloning choice block (the block that determines which copy of the loop is executed). Preconditions 1. Loop detection has completed and the loop table is populated. 2. The loops that will be considered are the ones with the LPFLG_ITER flag: "for ( ; test_condition(); i++)" Limitations 1. Loops containing nested exception handling regions are not cloned. (Cloning them would require creating new exception handling regions for the cloned loop, which is "hard".) There are a few other EH-related edge conditions that also cause us to reject cloning. 2. If the loop contains RETURN blocks, and cloning those would push us over the maximum number of allowed RETURN blocks in the function (either due to GC info encoding limitations or otherwise), we reject cloning. 3. Loop increment must be `i += 1` 4. Loop test must be `i < x` or `i <= x` where `x` is a constant, a variable, or `a.Length` for array `a` (There is some implementation support for decrementing loops, but it is incomplete.) 5. Loop must have been converted to a do-while form. 6. There are a few other loop well-formedness conditions. 7. Multi-dimensional (non-jagged) loop index checking is only partially implemented. 8. Constant initializations and constant limits must be non-negative. This is because the iterator variable will be used as an array index, and array indices must be non-negative. For non-constant (or not found) iterator variable `i` initialization, we add a dynamic check that `i >= 0`. Constant initializations can be checked statically. 9. The cloned loop (the slow path) is not added to the loop table, meaning certain downstream optimization passes do not see them. See https://github.com/dotnet/runtime/issues/43713. Assumptions 1. The assumption is that the optimization candidates collected during the identification phase will be the ones that will be optimized. In other words, the loop that is present originally will be the fast path. The cloned path will be the slow path and will be unoptimized. This allows us to collect additional information at the same time as identifying the optimization candidates. This later helps us to perform the optimizations during actual cloning. 2. All loop cloning choice conditions will automatically be "AND"-ed. These are bitwise AND operations. 3. Perform short circuit AND for (array != null) side effect check before hoisting (limit <= a.length) check. */ #pragma once class Compiler; /** * * Represents an array access and associated bounds checks. * Array access is required to have the array and indices in local variables. * This struct is constructed using a GT_INDEX node that is broken into * its sub trees. * */ struct ArrIndex { unsigned arrLcl; // The array base local num JitExpandArrayStack<unsigned> indLcls; // The indices local nums JitExpandArrayStack<GenTree*> bndsChks; // The bounds checks nodes along each dimension. unsigned rank; // Rank of the array BasicBlock* useBlock; // Block where the [] occurs ArrIndex(CompAllocator alloc) : arrLcl(BAD_VAR_NUM), indLcls(alloc), bndsChks(alloc), rank(0), useBlock(nullptr) { } #ifdef DEBUG void Print(unsigned dim = -1); void PrintBoundsCheckNodes(unsigned dim = -1); #endif }; // Forward declarations #define LC_OPT(en) struct en##OptInfo; #include "loopcloningopts.h" /** * * LcOptInfo represents the optimization information for loop cloning, * other classes are supposed to derive from this base class. * * Example usage: * * LcMdArrayOptInfo is multi-dimensional array optimization for which the * loop can be cloned. * * LcArrIndexOptInfo is a jagged array optimization for which the loop * can be cloned. * * So LcOptInfo represents any type of optimization opportunity that * occurs in a loop and the metadata for the optimization is stored in * this class. */ struct LcOptInfo { enum OptType { #define LC_OPT(en) en, #include "loopcloningopts.h" }; OptType optType; LcOptInfo(OptType optType) : optType(optType) { } OptType GetOptType() { return optType; } #define LC_OPT(en) \ en##OptInfo* As##en##OptInfo() \ { \ assert(optType == en); \ return reinterpret_cast<en##OptInfo*>(this); \ } #include "loopcloningopts.h" }; /** * * Optimization info for a multi-dimensional array. */ struct LcMdArrayOptInfo : public LcOptInfo { GenTreeArrElem* arrElem; // "arrElem" node of an MD array. unsigned dim; // "dim" represents up to what level of the rank this optimization applies to. // For example, a[i,j,k] could be the MD array "arrElem" but if "dim" is 2, // then this node is treated as though it were a[i,j] ArrIndex* index; // "index" cached computation in the form of an ArrIndex representation. LcMdArrayOptInfo(GenTreeArrElem* arrElem, unsigned dim) : LcOptInfo(LcMdArray), arrElem(arrElem), dim(dim), index(nullptr) { } ArrIndex* GetArrIndexForDim(CompAllocator alloc) { if (index == nullptr) { index = new (alloc) ArrIndex(alloc); index->rank = arrElem->gtArrRank; for (unsigned i = 0; i < dim; ++i) { index->indLcls.Push(arrElem->gtArrInds[i]->AsLclVarCommon()->GetLclNum()); } index->arrLcl = arrElem->gtArrObj->AsLclVarCommon()->GetLclNum(); } return index; } }; /** * * Optimization info for a jagged array. */ struct LcJaggedArrayOptInfo : public LcOptInfo { unsigned dim; // "dim" represents up to what level of the rank this optimization applies to. // For example, a[i][j][k] could be the jagged array but if "dim" is 2, // then this node is treated as though it were a[i][j] ArrIndex arrIndex; // ArrIndex representation of the array. Statement* stmt; // "stmt" where the optimization opportunity occurs. LcJaggedArrayOptInfo(ArrIndex& arrIndex, unsigned dim, Statement* stmt) : LcOptInfo(LcJaggedArray), dim(dim), arrIndex(arrIndex), stmt(stmt) { } }; /** * * Symbolic representation of a.length, or a[i][j].length or a[i,j].length and so on. * OperType decides whether "arrLength" is invoked on the array or if it is just an array. */ struct LC_Array { enum ArrType { Invalid, Jagged, MdArray }; enum OperType { None, ArrLen, }; ArrType type; // The type of the array on which to invoke length operator. ArrIndex* arrIndex; // ArrIndex representation of this array. OperType oper; #ifdef DEBUG void Print() { arrIndex->Print(dim); if (oper == ArrLen) { printf(".Length"); } } #endif int dim; // "dim" = which index to invoke arrLen on, if -1 invoke on the whole array // Example 1: a[0][1][2] and dim = 2 implies a[0][1].length // Example 2: a[0][1][2] and dim = -1 implies a[0][1][2].length LC_Array() : type(Invalid), dim(-1) { } LC_Array(ArrType type, ArrIndex* arrIndex, int dim, OperType oper) : type(type), arrIndex(arrIndex), oper(oper), dim(dim) { } LC_Array(ArrType type, ArrIndex* arrIndex, OperType oper) : type(type), arrIndex(arrIndex), oper(oper), dim(-1) { } // Equality operator bool operator==(const LC_Array& that) const { assert(type != Invalid && that.type != Invalid); // Types match and the array base matches. if (type != that.type || arrIndex->arrLcl != that.arrIndex->arrLcl || oper != that.oper) { return false; } // If the dim ranks are not matching, quit. int rank1 = GetDimRank(); int rank2 = that.GetDimRank(); if (rank1 != rank2) { return false; } // Check for the indices. for (int i = 0; i < rank1; ++i) { if (arrIndex->indLcls[i] != that.arrIndex->indLcls[i]) { return false; } } return true; } // The max dim on which length is invoked. int GetDimRank() const { return (dim < 0) ? (int)arrIndex->rank : dim; } // Get a tree representation for this symbolic a.length GenTree* ToGenTree(Compiler* comp, BasicBlock* bb); }; /** * * Symbolic representation of either a constant like 1 or 2, or a variable like V02 or V03, or an "LC_Array", * or the null constant. */ struct LC_Ident { enum IdentType { Invalid, Const, Var, ArrLen, Null, }; LC_Array arrLen; // The LC_Array if the type is "ArrLen" unsigned constant; // The constant value if this node is of type "Const", or the lcl num if "Var" IdentType type; // The type of this object // Equality operator bool operator==(const LC_Ident& that) const { switch (type) { case Const: case Var: return (type == that.type) && (constant == that.constant); case ArrLen: return (type == that.type) && (arrLen == that.arrLen); case Null: return (type == that.type); default: assert(!"Unknown LC_Ident type"); unreached(); } } #ifdef DEBUG void Print() { switch (type) { case Const: printf("%u", constant); break; case Var: printf("V%02d", constant); break; case ArrLen: arrLen.Print(); break; case Null: printf("null"); break; default: printf("INVALID"); break; } } #endif LC_Ident() : type(Invalid) { } LC_Ident(unsigned constant, IdentType type) : constant(constant), type(type) { } explicit LC_Ident(IdentType type) : type(type) { } explicit LC_Ident(const LC_Array& arrLen) : arrLen(arrLen), type(ArrLen) { } // Convert this symbolic representation into a tree node. GenTree* ToGenTree(Compiler* comp, BasicBlock* bb); }; /** * * Symbolic representation of an expr that involves an "LC_Ident" */ struct LC_Expr { enum ExprType { Invalid, Ident, }; LC_Ident ident; ExprType type; // Equality operator bool operator==(const LC_Expr& that) const { assert(type != Invalid && that.type != Invalid); // If the types don't match quit. if (type != that.type) { return false; } // Check if the ident match. return (ident == that.ident); } #ifdef DEBUG void Print() { if (type == Ident) { ident.Print(); } else { printf("INVALID"); } } #endif LC_Expr() : type(Invalid) { } explicit LC_Expr(const LC_Ident& ident) : ident(ident), type(Ident) { } // Convert LC_Expr into a tree node. GenTree* ToGenTree(Compiler* comp, BasicBlock* bb); }; /** * * Symbolic representation of a conditional operation involving two "LC_Expr": * LC_Expr < LC_Expr, for example: i > 0, i < a.length */ struct LC_Condition { LC_Expr op1; LC_Expr op2; genTreeOps oper; #ifdef DEBUG void Print() { op1.Print(); printf(" %s ", GenTree::OpName(oper)); op2.Print(); } #endif // Check if the condition evaluates statically to true or false, i < i => false, a.length > 0 => true // The result is put in "pResult" parameter and is valid if the method returns "true". Otherwise, the // condition could not be evaluated. bool Evaluates(bool* pResult); // Check if two conditions can be combined to yield one condition. bool Combines(const LC_Condition& cond, LC_Condition* newCond); LC_Condition() { } LC_Condition(genTreeOps oper, const LC_Expr& op1, const LC_Expr& op2) : op1(op1), op2(op2), oper(oper) { } // Convert this conditional operation into a GenTree. GenTree* ToGenTree(Compiler* comp, BasicBlock* bb, bool invert); }; /** * A deref tree of an array expression. * a[i][j][k], b[i] and a[i][y][k] are the occurrences in the loop, then, the tree would be: * a => { * i => { * j => { * k => {} * }, * y => { * k => {} * }, * } * }, * b => { * i => {} * } */ struct LC_Deref { const LC_Array array; JitExpandArrayStack<LC_Deref*>* children; unsigned level; LC_Deref(const LC_Array& array, unsigned level) : array(array), children(nullptr), level(level) { } LC_Deref* Find(unsigned lcl); unsigned Lcl(); bool HasChildren(); void EnsureChildren(CompAllocator alloc); static LC_Deref* Find(JitExpandArrayStack<LC_Deref*>* children, unsigned lcl); void DeriveLevelConditions(JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* len); #ifdef DEBUG void Print(unsigned indent = 0) { unsigned tab = 4 * indent; printf("%*sV%02d, level %d => {", tab, "", Lcl(), level); if (children != nullptr) { for (unsigned i = 0; i < children->Size(); ++i) { if (i > 0) { printf(","); } printf("\n"); #ifdef _MSC_VER (*children)[i]->Print(indent + 1); #else // _MSC_VER (*((JitExpandArray<LC_Deref*>*)children))[i]->Print(indent + 1); #endif // _MSC_VER } } printf("\n%*s}", tab, ""); } #endif }; /** * * The "context" represents data that is used for making loop-cloning decisions. * - The data is the collection of optimization opportunities * - and the conditions (LC_Condition) that decide between the fast * path or the slow path. * * BNF for LC_Condition: * LC_Condition : LC_Expr genTreeOps LC_Expr * LC_Expr : LC_Ident | LC_Ident + Constant * LC_Ident : Constant | Var | LC_Array * LC_Array : . * genTreeOps : GT_GE | GT_LE | GT_GT | GT_LT * */ struct LoopCloneContext { CompAllocator alloc; // The allocator // The array of optimization opportunities found in each loop. (loop x optimization-opportunities) jitstd::vector<JitExpandArrayStack<LcOptInfo*>*> optInfo; // The array of conditions that influence which path to take for each loop. (loop x cloning-conditions) jitstd::vector<JitExpandArrayStack<LC_Condition>*> conditions; // The array of dereference conditions found in each loop. (loop x deref-conditions) jitstd::vector<JitExpandArrayStack<LC_Array>*> derefs; // The array of block levels of conditions for each loop. (loop x level x conditions) jitstd::vector<JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>*> blockConditions; LoopCloneContext(unsigned loopCount, CompAllocator alloc) : alloc(alloc), optInfo(alloc), conditions(alloc), derefs(alloc), blockConditions(alloc) { optInfo.resize(loopCount, nullptr); conditions.resize(loopCount, nullptr); derefs.resize(loopCount, nullptr); blockConditions.resize(loopCount, nullptr); } // Evaluate conditions into a JTRUE stmt and put it in a new block after `insertAfter`. BasicBlock* CondToStmtInBlock(Compiler* comp, JitExpandArrayStack<LC_Condition>& conds, BasicBlock* slowHead, BasicBlock* insertAfter); // Get all the optimization information for loop "loopNum"; this information is held in "optInfo" array. // If NULL this allocates the optInfo[loopNum] array for "loopNum". JitExpandArrayStack<LcOptInfo*>* EnsureLoopOptInfo(unsigned loopNum); // Get all the optimization information for loop "loopNum"; this information is held in "optInfo" array. // If NULL this does not allocate the optInfo[loopNum] array for "loopNum". JitExpandArrayStack<LcOptInfo*>* GetLoopOptInfo(unsigned loopNum); // Cancel all optimizations for loop "loopNum" by clearing out the "conditions" member if non-null // and setting the optInfo to "null". If "null", then the user of this class is not supposed to // clone this loop. void CancelLoopOptInfo(unsigned loopNum); // Get the conditions that decide which loop to take for "loopNum." If NULL allocate an empty array. JitExpandArrayStack<LC_Condition>* EnsureConditions(unsigned loopNum); // Get the conditions for loop. No allocation is performed. JitExpandArrayStack<LC_Condition>* GetConditions(unsigned loopNum); // Ensure that the "deref" conditions array is allocated. JitExpandArrayStack<LC_Array>* EnsureDerefs(unsigned loopNum); // Get block conditions for each loop, no allocation is performed. JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* GetBlockConditions(unsigned loopNum); // Ensure that the block condition is present, if not allocate space. JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* EnsureBlockConditions(unsigned loopNum, unsigned totalBlocks); #ifdef DEBUG // Print the block conditions for the loop. void PrintBlockConditions(unsigned loopNum); void PrintBlockLevelConditions(unsigned level, JitExpandArrayStack<LC_Condition>* levelCond); #endif // Does the loop have block conditions? bool HasBlockConditions(unsigned loopNum); // Evaluate the conditions for "loopNum" and indicate if they are either all true or any of them are false. // // `pAllTrue` and `pAnyFalse` are OUT parameters. // // If `*pAllTrue` is `true`, then all the conditions are statically known to be true. // The caller doesn't need to clone the loop, but it can perform fast path optimizations. // // If `*pAnyFalse` is `true`, then at least one condition is statically known to be false. // The caller needs to abort cloning the loop (neither clone nor fast path optimizations.) // // If neither `*pAllTrue` nor `*pAnyFalse` is true, then the evaluation of some conditions are statically unknown. // // Assumes the conditions involve an AND join operator. void EvaluateConditions(unsigned loopNum, bool* pAllTrue, bool* pAnyFalse DEBUGARG(bool verbose)); private: void OptimizeConditions(JitExpandArrayStack<LC_Condition>& conds); public: // Optimize conditions to remove redundant conditions. void OptimizeConditions(unsigned loopNum DEBUGARG(bool verbose)); void OptimizeBlockConditions(unsigned loopNum DEBUGARG(bool verbose)); #ifdef DEBUG void PrintConditions(unsigned loopNum); #endif };
1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/jit/optimizer.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Optimizer XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif /*****************************************************************************/ void Compiler::optInit() { optLoopsMarked = false; fgHasLoops = false; loopAlignCandidates = 0; /* Initialize the # of tracked loops to 0 */ optLoopCount = 0; optLoopTable = nullptr; optCurLoopEpoch = 0; #ifdef DEBUG loopsAligned = 0; #endif /* Keep track of the number of calls and indirect calls made by this method */ optCallCount = 0; optIndirectCallCount = 0; optNativeCallCount = 0; optAssertionCount = 0; optAssertionDep = nullptr; optCSEstart = BAD_VAR_NUM; optCSEcount = 0; } DataFlow::DataFlow(Compiler* pCompiler) : m_pCompiler(pCompiler) { } //------------------------------------------------------------------------ // optSetBlockWeights: adjust block weights, as follows: // 1. A block that is not reachable from the entry block is marked "run rarely". // 2. If we're not using profile weights, then any block with a non-zero weight // that doesn't dominate all the return blocks has its weight dropped in half // (but only if the first block *does* dominate all the returns). // // Notes: // Depends on dominators, and fgReturnBlocks being set. // PhaseStatus Compiler::optSetBlockWeights() { noway_assert(opts.OptimizationEnabled()); assert(fgDomsComputed); assert(fgReturnBlocksComputed); #ifdef DEBUG bool changed = false; #endif bool firstBBDominatesAllReturns = true; const bool usingProfileWeights = fgIsUsingProfileWeights(); for (BasicBlock* const block : Blocks()) { /* Blocks that can't be reached via the first block are rarely executed */ if (!fgReachable(fgFirstBB, block)) { block->bbSetRunRarely(); } if (!usingProfileWeights && firstBBDominatesAllReturns) { // If the weight is already zero (and thus rarely run), there's no point scaling it. if (block->bbWeight != BB_ZERO_WEIGHT) { // If the block dominates all return blocks, leave the weight alone. Otherwise, // scale the weight by 0.5 as a heuristic that some other path gets some of the dynamic flow. // Note that `optScaleLoopBlocks` has a similar heuristic for loop blocks that don't dominate // their loop back edge. bool blockDominatesAllReturns = true; // Assume that we will dominate for (BasicBlockList* retBlocks = fgReturnBlocks; retBlocks != nullptr; retBlocks = retBlocks->next) { if (!fgDominate(block, retBlocks->block)) { blockDominatesAllReturns = false; break; } } if (block == fgFirstBB) { firstBBDominatesAllReturns = blockDominatesAllReturns; // Don't scale the weight of the first block, since it is guaranteed to execute. // If the first block does not dominate all the returns, we won't scale any of the function's // block weights. } else { // If we are not using profile weight then we lower the weight // of blocks that do not dominate a return block // if (!blockDominatesAllReturns) { INDEBUG(changed = true); // TODO-Cleanup: we should use: // block->scaleBBWeight(0.5); // since we are inheriting "from ourselves", but that leads to asm diffs due to minutely // different floating-point value in the calculation, and some code that compares weights // for equality. block->inheritWeightPercentage(block, 50); } } } } } #if DEBUG if (changed && verbose) { printf("\nAfter optSetBlockWeights:\n"); fgDispBasicBlocks(); printf("\n"); } /* Check that the flowgraph data (bbNum, bbRefs, bbPreds) is up-to-date */ fgDebugCheckBBlist(); #endif return PhaseStatus::MODIFIED_EVERYTHING; } //------------------------------------------------------------------------ // optScaleLoopBlocks: Scale the weight of loop blocks from 'begBlk' to 'endBlk'. // // Arguments: // begBlk - first block of range. Must be marked as a loop head (BBF_LOOP_HEAD). // endBlk - last block of range (inclusive). Must be reachable from `begBlk`. // // Operation: // Calculate the 'loop weight'. This is the amount to scale the weight of each block in the loop. // Our heuristic is that loops are weighted eight times more than straight-line code // (scale factor is BB_LOOP_WEIGHT_SCALE). If the loops are all properly formed this gives us these weights: // // 1 -- non-loop basic block // 8 -- single loop nesting // 64 -- double loop nesting // 512 -- triple loop nesting // void Compiler::optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk) { noway_assert(begBlk->bbNum <= endBlk->bbNum); noway_assert(begBlk->isLoopHead()); noway_assert(fgReachable(begBlk, endBlk)); noway_assert(!opts.MinOpts()); #ifdef DEBUG if (verbose) { printf("\nMarking a loop from " FMT_BB " to " FMT_BB, begBlk->bbNum, endBlk->bbNum); } #endif // Build list of back edges for block begBlk. flowList* backedgeList = nullptr; for (BasicBlock* const predBlock : begBlk->PredBlocks()) { // Is this a back edge? if (predBlock->bbNum >= begBlk->bbNum) { backedgeList = new (this, CMK_FlowList) flowList(predBlock, backedgeList); #if MEASURE_BLOCK_SIZE genFlowNodeCnt += 1; genFlowNodeSize += sizeof(flowList); #endif // MEASURE_BLOCK_SIZE } } // At least one backedge must have been found (the one from endBlk). noway_assert(backedgeList); auto reportBlockWeight = [&](BasicBlock* blk, const char* message) { #ifdef DEBUG if (verbose) { printf("\n " FMT_BB "(wt=" FMT_WT ")%s", blk->bbNum, blk->getBBWeight(this), message); } #endif // DEBUG }; for (BasicBlock* const curBlk : BasicBlockRangeList(begBlk, endBlk)) { // Don't change the block weight if it came from profile data. if (curBlk->hasProfileWeight()) { reportBlockWeight(curBlk, "; unchanged: has profile weight"); continue; } // Don't change the block weight if it's known to be rarely run. if (curBlk->isRunRarely()) { reportBlockWeight(curBlk, "; unchanged: run rarely"); continue; } // For curBlk to be part of a loop that starts at begBlk, curBlk must be reachable from begBlk and // (since this is a loop) begBlk must likewise be reachable from curBlk. if (fgReachable(curBlk, begBlk) && fgReachable(begBlk, curBlk)) { // If `curBlk` reaches any of the back edge blocks we set `reachable`. // If `curBlk` dominates any of the back edge blocks we set `dominates`. bool reachable = false; bool dominates = false; for (flowList* tmp = backedgeList; tmp != nullptr; tmp = tmp->flNext) { BasicBlock* backedge = tmp->getBlock(); reachable |= fgReachable(curBlk, backedge); dominates |= fgDominate(curBlk, backedge); if (dominates && reachable) { // No need to keep looking; we've already found all the info we need. break; } } if (reachable) { // If the block has BB_ZERO_WEIGHT, then it should be marked as rarely run, and skipped, above. noway_assert(curBlk->bbWeight > BB_ZERO_WEIGHT); weight_t scale = BB_LOOP_WEIGHT_SCALE; if (!dominates) { // If `curBlk` reaches but doesn't dominate any back edge to `endBlk` then there must be at least // some other path to `endBlk`, so don't give `curBlk` all the execution weight. scale = scale / 2; } curBlk->scaleBBWeight(scale); reportBlockWeight(curBlk, ""); } else { reportBlockWeight(curBlk, "; unchanged: back edge unreachable"); } } else { reportBlockWeight(curBlk, "; unchanged: block not in loop"); } } } //------------------------------------------------------------------------ // optUnmarkLoopBlocks: Unmark the blocks between 'begBlk' and 'endBlk' as part of a loop. // // Arguments: // begBlk - first block of range. Must be marked as a loop head (BBF_LOOP_HEAD). // endBlk - last block of range (inclusive). Must be reachable from `begBlk`. // // Operation: // A set of blocks that were previously marked as a loop are now to be unmarked, since we have decided that // for some reason this loop no longer exists. Basically we are just resetting the blocks bbWeight to their // previous values. // void Compiler::optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk) { noway_assert(begBlk->bbNum <= endBlk->bbNum); noway_assert(begBlk->isLoopHead()); noway_assert(!opts.MinOpts()); unsigned backEdgeCount = 0; for (BasicBlock* const predBlock : begBlk->PredBlocks()) { // Is this a backward edge? (from predBlock to begBlk) if (begBlk->bbNum > predBlock->bbNum) { continue; } // We only consider back-edges that are BBJ_COND or BBJ_ALWAYS for loops. if (!predBlock->KindIs(BBJ_COND, BBJ_ALWAYS)) { continue; } backEdgeCount++; } // Only unmark the loop blocks if we have exactly one loop back edge. if (backEdgeCount != 1) { #ifdef DEBUG if (verbose) { if (backEdgeCount > 0) { printf("\nNot removing loop at " FMT_BB ", due to an additional back edge", begBlk->bbNum); } else if (backEdgeCount == 0) { printf("\nNot removing loop at " FMT_BB ", due to no back edge", begBlk->bbNum); } } #endif return; } noway_assert(fgReachable(begBlk, endBlk)); #ifdef DEBUG if (verbose) { printf("\nUnmarking a loop from " FMT_BB " to " FMT_BB, begBlk->bbNum, endBlk->bbNum); } #endif for (BasicBlock* const curBlk : BasicBlockRangeList(begBlk, endBlk)) { // Stop if we go past the last block in the loop, as it may have been deleted. if (curBlk->bbNum > endBlk->bbNum) { break; } // Don't change the block weight if it's known to be rarely run. if (curBlk->isRunRarely()) { continue; } // Don't change the block weight if it came from profile data. if (curBlk->hasProfileWeight()) { continue; } // Don't unmark blocks that are maximum weight. if (curBlk->isMaxBBWeight()) { continue; } // For curBlk to be part of a loop that starts at begBlk, curBlk must be reachable from begBlk and // (since this is a loop) begBlk must likewise be reachable from curBlk. // if (fgReachable(curBlk, begBlk) && fgReachable(begBlk, curBlk)) { weight_t scale = 1.0 / BB_LOOP_WEIGHT_SCALE; if (!fgDominate(curBlk, endBlk)) { scale *= 2; } curBlk->scaleBBWeight(scale); JITDUMP("\n " FMT_BB "(wt=" FMT_WT ")", curBlk->bbNum, curBlk->getBBWeight(this)); } } JITDUMP("\n"); begBlk->unmarkLoopAlign(this DEBUG_ARG("Removed loop")); } /***************************************************************************************************** * * Function called to update the loop table and bbWeight before removing a block */ void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop) { if (!optLoopsMarked) { return; } noway_assert(!opts.MinOpts()); bool removeLoop = false; // If an unreachable block is a loop entry or bottom then the loop is unreachable. // Special case: the block was the head of a loop - or pointing to a loop entry. for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++) { LoopDsc& loop = optLoopTable[loopNum]; // Some loops may have been already removed by loop unrolling or conditional folding. if (loop.lpFlags & LPFLG_REMOVED) { continue; } // Avoid printing to the JitDump unless we're actually going to change something. // If we call reportBefore, then we're going to change the loop table, and we should print the // `reportAfter` info as well. Only print the `reportBefore` info once, if multiple changes to // the table are made. INDEBUG(bool reportedBefore = false); auto reportBefore = [&]() { #ifdef DEBUG if (verbose && !reportedBefore) { printf("optUpdateLoopsBeforeRemoveBlock " FMT_BB " Before: ", block->bbNum); optPrintLoopInfo(loopNum); printf("\n"); reportedBefore = true; } #endif // DEBUG }; auto reportAfter = [&]() { #ifdef DEBUG if (verbose && reportedBefore) { printf("optUpdateLoopsBeforeRemoveBlock " FMT_BB " After: ", block->bbNum); optPrintLoopInfo(loopNum); printf("\n"); } #endif // DEBUG }; if (block == loop.lpEntry || block == loop.lpBottom) { reportBefore(); optMarkLoopRemoved(loopNum); reportAfter(); continue; } // If the loop is still in the table any block in the loop must be reachable. noway_assert((loop.lpEntry != block) && (loop.lpBottom != block)); if (loop.lpExit == block) { reportBefore(); assert(loop.lpExitCnt == 1); --loop.lpExitCnt; loop.lpExit = nullptr; } // If `block` flows to the loop entry then the whole loop will become unreachable if it is the // only non-loop predecessor. switch (block->bbJumpKind) { case BBJ_NONE: if (block->bbNext == loop.lpEntry) { removeLoop = true; } break; case BBJ_COND: if ((block->bbNext == loop.lpEntry) || (block->bbJumpDest == loop.lpEntry)) { removeLoop = true; } break; case BBJ_ALWAYS: if (block->bbJumpDest == loop.lpEntry) { removeLoop = true; } break; case BBJ_SWITCH: for (BasicBlock* const bTarget : block->SwitchTargets()) { if (bTarget == loop.lpEntry) { removeLoop = true; break; } } break; default: break; } if (removeLoop) { // Check if the entry has other predecessors outside the loop. // TODO: Replace this when predecessors are available. for (BasicBlock* const auxBlock : Blocks()) { // Ignore blocks in the loop. if (loop.lpContains(auxBlock)) { continue; } switch (auxBlock->bbJumpKind) { case BBJ_NONE: if (auxBlock->bbNext == loop.lpEntry) { removeLoop = false; } break; case BBJ_COND: if ((auxBlock->bbNext == loop.lpEntry) || (auxBlock->bbJumpDest == loop.lpEntry)) { removeLoop = false; } break; case BBJ_ALWAYS: if (auxBlock->bbJumpDest == loop.lpEntry) { removeLoop = false; } break; case BBJ_SWITCH: for (BasicBlock* const bTarget : auxBlock->SwitchTargets()) { if (bTarget == loop.lpEntry) { removeLoop = false; break; } } break; default: break; } } if (removeLoop) { reportBefore(); optMarkLoopRemoved(loopNum); } } else if (loop.lpHead == block) { reportBefore(); /* The loop has a new head - Just update the loop table */ loop.lpHead = block->bbPrev; } reportAfter(); } if ((skipUnmarkLoop == false) && // block->KindIs(BBJ_ALWAYS, BBJ_COND) && // block->bbJumpDest->isLoopHead() && // (block->bbJumpDest->bbNum <= block->bbNum) && // fgDomsComputed && // (fgCurBBEpochSize == fgDomBBcount + 1) && // fgReachable(block->bbJumpDest, block)) { optUnmarkLoopBlocks(block->bbJumpDest, block); } } //------------------------------------------------------------------------ // optClearLoopIterInfo: Clear the info related to LPFLG_ITER loops in the loop table. // The various fields related to iterators is known to be valid for loop cloning and unrolling, // but becomes invalid afterwards. Clear the info that might be used incorrectly afterwards // in JitDump or by subsequent phases. // void Compiler::optClearLoopIterInfo() { for (unsigned lnum = 0; lnum < optLoopCount; lnum++) { LoopDsc& loop = optLoopTable[lnum]; loop.lpFlags &= ~(LPFLG_ITER | LPFLG_VAR_INIT | LPFLG_CONST_INIT | LPFLG_SIMD_LIMIT | LPFLG_VAR_LIMIT | LPFLG_CONST_LIMIT | LPFLG_ARRLEN_LIMIT); loop.lpIterTree = nullptr; loop.lpInitBlock = nullptr; loop.lpConstInit = -1; // union with loop.lpVarInit loop.lpTestTree = nullptr; } } #ifdef DEBUG /***************************************************************************** * * Print loop info in an uniform way. */ void Compiler::optPrintLoopInfo(const LoopDsc* loop, bool printVerbose /* = false */) { assert(optLoopTable != nullptr); assert((&optLoopTable[0] <= loop) && (loop < &optLoopTable[optLoopCount])); unsigned lnum = (unsigned)(loop - optLoopTable); assert(lnum < optLoopCount); assert(&optLoopTable[lnum] == loop); if (loop->lpFlags & LPFLG_REMOVED) { // If a loop has been removed, it might be dangerous to print its fields (e.g., loop unrolling // nulls out the lpHead field). printf(FMT_LP " REMOVED", lnum); return; } printf(FMT_LP ", from " FMT_BB " to " FMT_BB " (Head=" FMT_BB ", Entry=" FMT_BB, lnum, loop->lpTop->bbNum, loop->lpBottom->bbNum, loop->lpHead->bbNum, loop->lpEntry->bbNum); if (loop->lpExitCnt == 1) { printf(", Exit=" FMT_BB, loop->lpExit->bbNum); } else { printf(", ExitCnt=%d", loop->lpExitCnt); } if (loop->lpParent != BasicBlock::NOT_IN_LOOP) { printf(", parent=" FMT_LP, loop->lpParent); } printf(")"); if (printVerbose) { if (loop->lpChild != BasicBlock::NOT_IN_LOOP) { printf(", child loop = " FMT_LP, loop->lpChild); } if (loop->lpSibling != BasicBlock::NOT_IN_LOOP) { printf(", sibling loop = " FMT_LP, loop->lpSibling); } // If an iterator loop print the iterator and the initialization. if (loop->lpFlags & LPFLG_ITER) { printf(" [over V%02u", loop->lpIterVar()); printf(" ("); printf(GenTree::OpName(loop->lpIterOper())); printf(" %d)", loop->lpIterConst()); if (loop->lpFlags & LPFLG_CONST_INIT) { printf(" from %d", loop->lpConstInit); } if (loop->lpFlags & LPFLG_VAR_INIT) { printf(" from V%02u", loop->lpVarInit); } if (loop->lpFlags & (LPFLG_CONST_INIT | LPFLG_VAR_INIT)) { if (loop->lpInitBlock != loop->lpHead) { printf(" (in " FMT_BB ")", loop->lpInitBlock->bbNum); } } // If a simple test condition print operator and the limits */ printf(" %s", GenTree::OpName(loop->lpTestOper())); if (loop->lpFlags & LPFLG_CONST_LIMIT) { printf(" %d", loop->lpConstLimit()); if (loop->lpFlags & LPFLG_SIMD_LIMIT) { printf(" (simd)"); } } if (loop->lpFlags & LPFLG_VAR_LIMIT) { printf(" V%02u", loop->lpVarLimit()); } if (loop->lpFlags & LPFLG_ARRLEN_LIMIT) { ArrIndex* index = new (getAllocator(CMK_DebugOnly)) ArrIndex(getAllocator(CMK_DebugOnly)); if (loop->lpArrLenLimit(this, index)) { printf(" "); index->Print(); printf(".Length"); } else { printf(" ???.Length"); } } printf("]"); } // Print the flags if (loop->lpFlags & LPFLG_CONTAINS_CALL) { printf(" call"); } if (loop->lpFlags & LPFLG_HAS_PREHEAD) { printf(" prehead"); } if (loop->lpFlags & LPFLG_DONT_UNROLL) { printf(" !unroll"); } if (loop->lpFlags & LPFLG_ASGVARS_YES) { printf(" avyes"); } if (loop->lpFlags & LPFLG_ASGVARS_INC) { printf(" avinc"); } } } void Compiler::optPrintLoopInfo(unsigned lnum, bool printVerbose /* = false */) { assert(lnum < optLoopCount); const LoopDsc& loop = optLoopTable[lnum]; optPrintLoopInfo(&loop, printVerbose); } //------------------------------------------------------------------------ // optPrintLoopTable: Print the loop table // void Compiler::optPrintLoopTable() { printf("\n*************** Natural loop table\n"); if (optLoopCount == 0) { printf("No loops\n"); } else { for (unsigned loopInd = 0; loopInd < optLoopCount; loopInd++) { optPrintLoopInfo(loopInd, /* verbose */ true); printf("\n"); } } printf("\n"); } #endif // DEBUG //------------------------------------------------------------------------ // optPopulateInitInfo: Populate loop init info in the loop table. // // Arguments: // loopInd - loop index // initBlock - block in which the initialization lives. // init - the tree that is supposed to initialize the loop iterator. // iterVar - loop iteration variable. // // Return Value: // "false" if the loop table could not be populated with the loop iterVar init info. // // Operation: // The 'init' tree is checked if its lhs is a local and rhs is either // a const or a local. // bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar) { // Operator should be = if (init->gtOper != GT_ASG) { return false; } GenTree* lhs = init->AsOp()->gtOp1; GenTree* rhs = init->AsOp()->gtOp2; // LHS has to be local and should equal iterVar. if (lhs->gtOper != GT_LCL_VAR || lhs->AsLclVarCommon()->GetLclNum() != iterVar) { return false; } // RHS can be constant or local var. // TODO-CQ: CLONE: Add arr length for descending loops. if (rhs->gtOper == GT_CNS_INT && rhs->TypeGet() == TYP_INT) { optLoopTable[loopInd].lpFlags |= LPFLG_CONST_INIT; optLoopTable[loopInd].lpConstInit = (int)rhs->AsIntCon()->gtIconVal; optLoopTable[loopInd].lpInitBlock = initBlock; } else if (rhs->gtOper == GT_LCL_VAR) { optLoopTable[loopInd].lpFlags |= LPFLG_VAR_INIT; optLoopTable[loopInd].lpVarInit = rhs->AsLclVarCommon()->GetLclNum(); optLoopTable[loopInd].lpInitBlock = initBlock; } else { return false; } return true; } //---------------------------------------------------------------------------------- // optCheckIterInLoopTest: Check if iter var is used in loop test. // // Arguments: // test "jtrue" tree or an asg of the loop iter termination condition // from/to blocks (beg, end) which are part of the loop. // iterVar loop iteration variable. // loopInd loop index. // // Operation: // The test tree is parsed to check if "iterVar" matches the lhs of the condition // and the rhs limit is extracted from the "test" tree. The limit information is // added to the loop table. // // Return Value: // "false" if the loop table could not be populated with the loop test info or // if the test condition doesn't involve iterVar. // bool Compiler::optCheckIterInLoopTest( unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar) { // Obtain the relop from the "test" tree. GenTree* relop; if (test->gtOper == GT_JTRUE) { relop = test->gtGetOp1(); } else { assert(test->gtOper == GT_ASG); relop = test->gtGetOp2(); } noway_assert(relop->OperIsCompare()); GenTree* opr1 = relop->AsOp()->gtOp1; GenTree* opr2 = relop->AsOp()->gtOp2; GenTree* iterOp; GenTree* limitOp; // Make sure op1 or op2 is the iterVar. if (opr1->gtOper == GT_LCL_VAR && opr1->AsLclVarCommon()->GetLclNum() == iterVar) { iterOp = opr1; limitOp = opr2; } else if (opr2->gtOper == GT_LCL_VAR && opr2->AsLclVarCommon()->GetLclNum() == iterVar) { iterOp = opr2; limitOp = opr1; } else { return false; } if (iterOp->gtType != TYP_INT) { return false; } // Mark the iterator node. iterOp->gtFlags |= GTF_VAR_ITERATOR; // Check what type of limit we have - constant, variable or arr-len. if (limitOp->gtOper == GT_CNS_INT) { optLoopTable[loopInd].lpFlags |= LPFLG_CONST_LIMIT; if ((limitOp->gtFlags & GTF_ICON_SIMD_COUNT) != 0) { optLoopTable[loopInd].lpFlags |= LPFLG_SIMD_LIMIT; } } else if (limitOp->gtOper == GT_LCL_VAR && !optIsVarAssigned(from, to, nullptr, limitOp->AsLclVarCommon()->GetLclNum())) { optLoopTable[loopInd].lpFlags |= LPFLG_VAR_LIMIT; } else if (limitOp->gtOper == GT_ARR_LENGTH) { optLoopTable[loopInd].lpFlags |= LPFLG_ARRLEN_LIMIT; } else { return false; } // Save the type of the comparison between the iterator and the limit. optLoopTable[loopInd].lpTestTree = relop; return true; } //---------------------------------------------------------------------------------- // optIsLoopIncrTree: Check if loop is a tree of form v += 1 or v = v + 1 // // Arguments: // incr The incr tree to be checked. Whether incr tree is // oper-equal(+=, -=...) type nodes or v=v+1 type ASG nodes. // // Operation: // The test tree is parsed to check if "iterVar" matches the lhs of the condition // and the rhs limit is extracted from the "test" tree. The limit information is // added to the loop table. // // Return Value: // iterVar local num if the iterVar is found, otherwise BAD_VAR_NUM. // unsigned Compiler::optIsLoopIncrTree(GenTree* incr) { GenTree* incrVal; genTreeOps updateOper; unsigned iterVar = incr->IsLclVarUpdateTree(&incrVal, &updateOper); if (iterVar != BAD_VAR_NUM) { // We have v = v op y type asg node. switch (updateOper) { case GT_ADD: case GT_SUB: case GT_MUL: case GT_RSH: case GT_LSH: break; default: return BAD_VAR_NUM; } // Increment should be by a const int. // TODO-CQ: CLONE: allow variable increments. if ((incrVal->gtOper != GT_CNS_INT) || (incrVal->TypeGet() != TYP_INT)) { return BAD_VAR_NUM; } } return iterVar; } //---------------------------------------------------------------------------------- // optComputeIterInfo: Check tree is loop increment of a lcl that is loop-invariant. // // Arguments: // from, to - are blocks (beg, end) which are part of the loop. // incr - tree that increments the loop iterator. v+=1 or v=v+1. // pIterVar - see return value. // // Return Value: // Returns true if iterVar "v" can be returned in "pIterVar", otherwise returns // false. // // Operation: // Check if the "incr" tree is a "v=v+1 or v+=1" type tree and make sure it is not // assigned in the loop. // bool Compiler::optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar) { unsigned iterVar = optIsLoopIncrTree(incr); if (iterVar == BAD_VAR_NUM) { return false; } if (optIsVarAssigned(from, to, incr, iterVar)) { JITDUMP("iterVar is assigned in loop\n"); return false; } *pIterVar = iterVar; return true; } //---------------------------------------------------------------------------------- // optIsLoopTestEvalIntoTemp: // Pattern match if the test tree is computed into a tmp // and the "tmp" is used as jump condition for loop termination. // // Arguments: // testStmt - is the JTRUE statement that is of the form: jmpTrue (Vtmp != 0) // where Vtmp contains the actual loop test result. // newTestStmt - contains the statement that is the actual test stmt involving // the loop iterator. // // Return Value: // Returns true if a new test tree can be obtained. // // Operation: // Scan if the current stmt is a jtrue with (Vtmp != 0) as condition // Then returns the rhs for def of Vtmp as the "test" node. // // Note: // This method just retrieves what it thinks is the "test" node, // the callers are expected to verify that "iterVar" is used in the test. // bool Compiler::optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt) { GenTree* test = testStmt->GetRootNode(); if (test->gtOper != GT_JTRUE) { return false; } GenTree* relop = test->gtGetOp1(); noway_assert(relop->OperIsCompare()); GenTree* opr1 = relop->AsOp()->gtOp1; GenTree* opr2 = relop->AsOp()->gtOp2; // Make sure we have jtrue (vtmp != 0) if ((relop->OperGet() == GT_NE) && (opr1->OperGet() == GT_LCL_VAR) && (opr2->OperGet() == GT_CNS_INT) && opr2->IsIntegralConst(0)) { // Get the previous statement to get the def (rhs) of Vtmp to see // if the "test" is evaluated into Vtmp. Statement* prevStmt = testStmt->GetPrevStmt(); if (prevStmt == nullptr) { return false; } GenTree* tree = prevStmt->GetRootNode(); if (tree->OperGet() == GT_ASG) { GenTree* lhs = tree->AsOp()->gtOp1; GenTree* rhs = tree->AsOp()->gtOp2; // Return as the new test node. if (lhs->gtOper == GT_LCL_VAR && lhs->AsLclVarCommon()->GetLclNum() == opr1->AsLclVarCommon()->GetLclNum()) { if (rhs->OperIsCompare()) { *newTestStmt = prevStmt; return true; } } } } return false; } //---------------------------------------------------------------------------------- // optExtractInitTestIncr: // Extract the "init", "test" and "incr" nodes of the loop. // // Arguments: // head - Loop head block // bottom - Loop bottom block // top - Loop top block // ppInit - The init stmt of the loop if found. // ppTest - The test stmt of the loop if found. // ppIncr - The incr stmt of the loop if found. // // Return Value: // The results are put in "ppInit", "ppTest" and "ppIncr" if the method // returns true. Returns false if the information can't be extracted. // // Operation: // Check if the "test" stmt is last stmt in the loop "bottom". If found good, // "test" stmt is found. Try to find the "incr" stmt. Check previous stmt of // "test" to get the "incr" stmt. If it is not found it could be a loop of the // below form. // // +-------<-----------------<-----------+ // | | // v | // BBinit(head) -> BBcond(top) -> BBLoopBody(bottom) ---^ // // Check if the "incr" tree is present in the loop "top" node as the last stmt. // Also check if the "test" tree is assigned to a tmp node and the tmp is used // in the jtrue condition. // // Note: // This method just retrieves what it thinks is the "test" node, // the callers are expected to verify that "iterVar" is used in the test. // bool Compiler::optExtractInitTestIncr( BasicBlock* head, BasicBlock* bottom, BasicBlock* top, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr) { assert(ppInit != nullptr); assert(ppTest != nullptr); assert(ppIncr != nullptr); // Check if last two statements in the loop body are the increment of the iterator // and the loop termination test. noway_assert(bottom->bbStmtList != nullptr); Statement* testStmt = bottom->lastStmt(); noway_assert(testStmt != nullptr && testStmt->GetNextStmt() == nullptr); Statement* newTestStmt; if (optIsLoopTestEvalIntoTemp(testStmt, &newTestStmt)) { testStmt = newTestStmt; } // Check if we have the incr stmt before the test stmt, if we don't, // check if incr is part of the loop "top". Statement* incrStmt = testStmt->GetPrevStmt(); if (incrStmt == nullptr || optIsLoopIncrTree(incrStmt->GetRootNode()) == BAD_VAR_NUM) { if (top == nullptr || top->bbStmtList == nullptr || top->bbStmtList->GetPrevStmt() == nullptr) { return false; } // If the prev stmt to loop test is not incr, then check if we have loop test evaluated into a tmp. Statement* toplastStmt = top->lastStmt(); if (optIsLoopIncrTree(toplastStmt->GetRootNode()) != BAD_VAR_NUM) { incrStmt = toplastStmt; } else { return false; } } assert(testStmt != incrStmt); // Find the last statement in the loop pre-header which we expect to be the initialization of // the loop iterator. Statement* phdrStmt = head->firstStmt(); if (phdrStmt == nullptr) { return false; } Statement* initStmt = phdrStmt->GetPrevStmt(); noway_assert(initStmt != nullptr && (initStmt->GetNextStmt() == nullptr)); // If it is a duplicated loop condition, skip it. if (initStmt->GetRootNode()->OperIs(GT_JTRUE)) { bool doGetPrev = true; #ifdef DEBUG if (opts.optRepeat) { // Previous optimization passes may have inserted compiler-generated // statements other than duplicated loop conditions. doGetPrev = (initStmt->GetPrevStmt() != nullptr); } else { // Must be a duplicated loop condition. noway_assert(initStmt->GetRootNode()->gtOper == GT_JTRUE); } #endif // DEBUG if (doGetPrev) { initStmt = initStmt->GetPrevStmt(); } noway_assert(initStmt != nullptr); } *ppInit = initStmt->GetRootNode(); *ppTest = testStmt->GetRootNode(); *ppIncr = incrStmt->GetRootNode(); return true; } /***************************************************************************** * * Record the loop in the loop table. Return true if successful, false if * out of entries in loop table. */ bool Compiler::optRecordLoop( BasicBlock* head, BasicBlock* top, BasicBlock* entry, BasicBlock* bottom, BasicBlock* exit, unsigned char exitCnt) { if (exitCnt == 1) { noway_assert(exit != nullptr); } // Record this loop in the table, if there's room. assert(optLoopCount <= BasicBlock::MAX_LOOP_NUM); if (optLoopCount == BasicBlock::MAX_LOOP_NUM) { #if COUNT_LOOPS loopOverflowThisMethod = true; #endif return false; } // Assumed preconditions on the loop we're adding. assert(top->bbNum <= entry->bbNum); assert(entry->bbNum <= bottom->bbNum); assert(head->bbNum < top->bbNum || head->bbNum > bottom->bbNum); unsigned char loopInd = optLoopCount; if (optLoopTable == nullptr) { assert(loopInd == 0); optLoopTable = getAllocator(CMK_LoopOpt).allocate<LoopDsc>(BasicBlock::MAX_LOOP_NUM); NewLoopEpoch(); } else { // If the new loop contains any existing ones, add it in the right place. for (unsigned char prevPlus1 = optLoopCount; prevPlus1 > 0; prevPlus1--) { unsigned char prev = prevPlus1 - 1; if (optLoopTable[prev].lpContainedBy(top, bottom)) { loopInd = prev; } } // Move up any loops if necessary. for (unsigned j = optLoopCount; j > loopInd; j--) { optLoopTable[j] = optLoopTable[j - 1]; } } #ifdef DEBUG for (unsigned i = loopInd + 1; i < optLoopCount; i++) { // The loop is well-formed. assert(optLoopTable[i].lpWellFormed()); // Check for disjoint. if (optLoopTable[i].lpDisjoint(top, bottom)) { continue; } // Otherwise, assert complete containment (of optLoopTable[i] in new loop). assert(optLoopTable[i].lpContainedBy(top, bottom)); } #endif // DEBUG optLoopTable[loopInd].lpHead = head; optLoopTable[loopInd].lpTop = top; optLoopTable[loopInd].lpBottom = bottom; optLoopTable[loopInd].lpEntry = entry; optLoopTable[loopInd].lpExit = exit; optLoopTable[loopInd].lpExitCnt = exitCnt; optLoopTable[loopInd].lpParent = BasicBlock::NOT_IN_LOOP; optLoopTable[loopInd].lpChild = BasicBlock::NOT_IN_LOOP; optLoopTable[loopInd].lpSibling = BasicBlock::NOT_IN_LOOP; optLoopTable[loopInd].lpAsgVars = AllVarSetOps::UninitVal(); optLoopTable[loopInd].lpFlags = LPFLG_EMPTY; // We haven't yet recorded any side effects. for (MemoryKind memoryKind : allMemoryKinds()) { optLoopTable[loopInd].lpLoopHasMemoryHavoc[memoryKind] = false; } optLoopTable[loopInd].lpFieldsModified = nullptr; optLoopTable[loopInd].lpArrayElemTypesModified = nullptr; // // Try to find loops that have an iterator (i.e. for-like loops) "for (init; test; incr){ ... }" // We have the following restrictions: // 1. The loop condition must be a simple one i.e. only one JTRUE node // 2. There must be a loop iterator (a local var) that is // incremented (decremented or lsh, rsh, mul) with a constant value // 3. The iterator is incremented exactly once // 4. The loop condition must use the iterator. // if (bottom->bbJumpKind == BBJ_COND) { GenTree* init; GenTree* test; GenTree* incr; if (!optExtractInitTestIncr(head, bottom, top, &init, &test, &incr)) { goto DONE_LOOP; } unsigned iterVar = BAD_VAR_NUM; if (!optComputeIterInfo(incr, head->bbNext, bottom, &iterVar)) { goto DONE_LOOP; } // Make sure the "iterVar" initialization is never skipped, // i.e. every pred of ENTRY other than HEAD is in the loop. for (BasicBlock* const predBlock : entry->PredBlocks()) { if ((predBlock != head) && !optLoopTable[loopInd].lpContains(predBlock)) { goto DONE_LOOP; } } if (!optPopulateInitInfo(loopInd, head, init, iterVar)) { goto DONE_LOOP; } // Check that the iterator is used in the loop condition. if (!optCheckIterInLoopTest(loopInd, test, head->bbNext, bottom, iterVar)) { goto DONE_LOOP; } // We know the loop has an iterator at this point ->flag it as LPFLG_ITER // Record the iterator, the pointer to the test node // and the initial value of the iterator (constant or local var) optLoopTable[loopInd].lpFlags |= LPFLG_ITER; // Record iterator. optLoopTable[loopInd].lpIterTree = incr; #if COUNT_LOOPS // Save the initial value of the iterator - can be lclVar or constant // Flag the loop accordingly. iterLoopCount++; #endif #if COUNT_LOOPS simpleTestLoopCount++; #endif #if COUNT_LOOPS // Check if a constant iteration loop. if ((optLoopTable[loopInd].lpFlags & LPFLG_CONST_INIT) && (optLoopTable[loopInd].lpFlags & LPFLG_CONST_LIMIT)) { // This is a constant loop. constIterLoopCount++; } #endif #ifdef DEBUG if (verbose && 0) { printf("\nConstant loop initializer:\n"); gtDispTree(init); printf("\nConstant loop body:\n"); BasicBlock* block = head; do { block = block->bbNext; for (Statement* const stmt : block->Statements()) { if (stmt->GetRootNode() == incr) { break; } printf("\n"); gtDispTree(stmt->GetRootNode()); } } while (block != bottom); } #endif // DEBUG } DONE_LOOP: bool loopInsertedAtEnd = (loopInd == optLoopCount); optLoopCount++; #ifdef DEBUG if (verbose) { printf("Recorded loop %s", loopInsertedAtEnd ? "" : "(extended) "); optPrintLoopInfo(loopInd, /* verbose */ true); printf("\n"); } #endif // DEBUG return true; } #ifdef DEBUG void Compiler::optCheckPreds() { for (BasicBlock* const block : Blocks()) { for (BasicBlock* const predBlock : block->PredBlocks()) { // make sure this pred is part of the BB list BasicBlock* bb; for (bb = fgFirstBB; bb; bb = bb->bbNext) { if (bb == predBlock) { break; } } noway_assert(bb); switch (bb->bbJumpKind) { case BBJ_COND: if (bb->bbJumpDest == block) { break; } FALLTHROUGH; case BBJ_NONE: noway_assert(bb->bbNext == block); break; case BBJ_EHFILTERRET: case BBJ_ALWAYS: case BBJ_EHCATCHRET: noway_assert(bb->bbJumpDest == block); break; default: break; } } } } #endif // DEBUG namespace { //------------------------------------------------------------------------ // LoopSearch: Class that handles scanning a range of blocks to detect a loop, // moving blocks to make the loop body contiguous, and recording the loop. // // We will use the following terminology: // HEAD - the basic block that flows into the loop ENTRY block (Currently MUST be lexically before entry). // Not part of the looping of the loop. // TOP - the target of the backward edge from BOTTOM, and the lexically first basic block (in bbNext order) // within this loop. // BOTTOM - the lexically last block in the loop (i.e. the block from which we jump to the top) // EXIT - the predecessor of loop's unique exit edge, if it has a unique exit edge; else nullptr // ENTRY - the entry in the loop (not necessarly the TOP), but there must be only one entry // // We (currently) require the body of a loop to be a contiguous (in bbNext order) sequence of basic blocks. // When the loop is identified, blocks will be moved out to make it a compact contiguous region if possible, // and in cases where compaction is not possible, we'll subsequently treat all blocks in the lexical range // between TOP and BOTTOM as part of the loop even if they aren't part of the SCC. // Regarding nesting: Since a given block can only have one back-edge (we only detect loops with back-edges // from BBJ_COND or BBJ_ALWAYS blocks), no two loops will share the same BOTTOM. Two loops may share the // same TOP/ENTRY as reported by LoopSearch, and optCanonicalizeLoopNest will subsequently re-write // the CFG so that no two loops share the same TOP/ENTRY anymore. // // | // v // head // | // | top <--+ // | | | // | ... | // | | | // | v | // +---> entry | // | | // ... | // | | // v | // +-- exit/tail | // | | | // | ... | // | | | // | v | // | bottom ---+ // | // +------+ // | // v // class LoopSearch { // Keeping track of which blocks are in the loop requires two block sets since we may add blocks // as we go but the BlockSet type's max ID doesn't increase to accommodate them. Define a helper // struct to make the ensuing code more readable. struct LoopBlockSet { private: // Keep track of blocks with bbNum <= oldBlockMaxNum in a regular BlockSet, since // it can hold all of them. BlockSet oldBlocksInLoop; // Blocks with bbNum <= oldBlockMaxNum // Keep track of blocks with bbNum > oldBlockMaxNum in a separate BlockSet, but // indexing them by (blockNum - oldBlockMaxNum); since we won't generate more than // one new block per old block, this must be sufficient to track any new blocks. BlockSet newBlocksInLoop; // Blocks with bbNum > oldBlockMaxNum Compiler* comp; unsigned int oldBlockMaxNum; public: LoopBlockSet(Compiler* comp) : oldBlocksInLoop(BlockSetOps::UninitVal()) , newBlocksInLoop(BlockSetOps::UninitVal()) , comp(comp) , oldBlockMaxNum(comp->fgBBNumMax) { } void Reset(unsigned int seedBlockNum) { if (BlockSetOps::MayBeUninit(oldBlocksInLoop)) { // Either the block sets are uninitialized (and long), so we need to initialize // them (and allocate their backing storage), or they are short and empty, so // assigning MakeEmpty to them is as cheap as ClearD. oldBlocksInLoop = BlockSetOps::MakeEmpty(comp); newBlocksInLoop = BlockSetOps::MakeEmpty(comp); } else { // We know the backing storage is already allocated, so just clear it. BlockSetOps::ClearD(comp, oldBlocksInLoop); BlockSetOps::ClearD(comp, newBlocksInLoop); } assert(seedBlockNum <= oldBlockMaxNum); BlockSetOps::AddElemD(comp, oldBlocksInLoop, seedBlockNum); } bool CanRepresent(unsigned int blockNum) { // We can represent old blocks up to oldBlockMaxNum, and // new blocks up to 2 * oldBlockMaxNum. return (blockNum <= 2 * oldBlockMaxNum); } bool IsMember(unsigned int blockNum) { if (blockNum > oldBlockMaxNum) { return BlockSetOps::IsMember(comp, newBlocksInLoop, blockNum - oldBlockMaxNum); } else { return BlockSetOps::IsMember(comp, oldBlocksInLoop, blockNum); } } void Insert(unsigned int blockNum) { if (blockNum > oldBlockMaxNum) { BlockSetOps::AddElemD(comp, newBlocksInLoop, blockNum - oldBlockMaxNum); } else { BlockSetOps::AddElemD(comp, oldBlocksInLoop, blockNum); } } bool TestAndInsert(unsigned int blockNum) { if (blockNum > oldBlockMaxNum) { unsigned int shiftedNum = blockNum - oldBlockMaxNum; if (!BlockSetOps::IsMember(comp, newBlocksInLoop, shiftedNum)) { BlockSetOps::AddElemD(comp, newBlocksInLoop, shiftedNum); return false; } } else { if (!BlockSetOps::IsMember(comp, oldBlocksInLoop, blockNum)) { BlockSetOps::AddElemD(comp, oldBlocksInLoop, blockNum); return false; } } return true; } }; LoopBlockSet loopBlocks; // Set of blocks identified as part of the loop Compiler* comp; // See LoopSearch class comment header for a diagram relating these fields: BasicBlock* head; // Predecessor of unique entry edge BasicBlock* top; // Successor of back-edge from BOTTOM BasicBlock* bottom; // Predecessor of back-edge to TOP, also lexically last in-loop block BasicBlock* entry; // Successor of unique entry edge BasicBlock* lastExit; // Most recently discovered exit block unsigned char exitCount; // Number of discovered exit edges unsigned int oldBlockMaxNum; // Used to identify new blocks created during compaction BlockSet bottomBlocks; // BOTTOM blocks of already-recorded loops #ifdef DEBUG bool forgotExit = false; // Flags a rare case where lastExit gets nulled out, for assertions #endif bool changedFlowGraph = false; // Signals that loop compaction has modified the flow graph public: LoopSearch(Compiler* comp) : loopBlocks(comp), comp(comp), oldBlockMaxNum(comp->fgBBNumMax), bottomBlocks(BlockSetOps::MakeEmpty(comp)) { // Make sure we've renumbered such that the bitsets can hold all the bits assert(comp->fgBBNumMax <= comp->fgCurBBEpochSize); } //------------------------------------------------------------------------ // RecordLoop: Notify the Compiler that a loop has been found. // // Return Value: // true - Loop successfully recorded. // false - Compiler has run out of loop descriptors; loop not recorded. // bool RecordLoop() { // At this point we have a compact loop - record it in the loop table. // If we found only one exit, record it in the table too // (otherwise an exit = nullptr in the loop table means multiple exits). BasicBlock* onlyExit = (exitCount == 1 ? lastExit : nullptr); if (comp->optRecordLoop(head, top, entry, bottom, onlyExit, exitCount)) { // Record the BOTTOM block for future reference before returning. assert(bottom->bbNum <= oldBlockMaxNum); BlockSetOps::AddElemD(comp, bottomBlocks, bottom->bbNum); return true; } // Unable to record this loop because the loop descriptor table overflowed. return false; } //------------------------------------------------------------------------ // ChangedFlowGraph: Determine whether loop compaction has modified the flow graph. // // Return Value: // true - The flow graph has been modified; fgUpdateChangedFlowGraph should // be called (which is the caller's responsibility). // false - The flow graph has not been modified by this LoopSearch. // bool ChangedFlowGraph() { return changedFlowGraph; } //------------------------------------------------------------------------ // FindLoop: Search for a loop with the given HEAD block and back-edge. // // Arguments: // head - Block to be the HEAD of any loop identified // top - Block to be the TOP of any loop identified // bottom - Block to be the BOTTOM of any loop identified // // Return Value: // true - Found a valid loop. // false - Did not find a valid loop. // // Notes: // May modify flow graph to make loop compact before returning. // Will set instance fields to track loop's extent and exits if a valid // loop is found, and potentially trash them otherwise. // bool FindLoop(BasicBlock* head, BasicBlock* top, BasicBlock* bottom) { // Is this a loop candidate? - We look for "back edges", i.e. an edge from BOTTOM // to TOP (note that this is an abuse of notation since this is not necessarily a back edge // as the definition says, but merely an indication that we have a loop there). // Thus, we have to be very careful and after entry discovery check that it is indeed // the only place we enter the loop (especially for non-reducible flow graphs). if (top->bbNum > bottom->bbNum) // is this a backward edge? (from BOTTOM to TOP) { // Edge from BOTTOM to TOP is not a backward edge return false; } if (bottom->bbNum > oldBlockMaxNum) { // Not a true back-edge; bottom is a block added to reconnect fall-through during // loop processing, so its block number does not reflect its position. return false; } if (bottom->KindIs(BBJ_EHFINALLYRET, BBJ_EHFILTERRET, BBJ_EHCATCHRET, BBJ_CALLFINALLY, BBJ_SWITCH)) { // BBJ_EHFINALLYRET, BBJ_EHFILTERRET, BBJ_EHCATCHRET, and BBJ_CALLFINALLY can never form a loop. // BBJ_SWITCH that has a backward jump appears only for labeled break. return false; } // The presence of a "back edge" is an indication that a loop might be present here. // // Definition: A loop is: // 1. A collection of STRONGLY CONNECTED nodes i.e. there is a path from any // node in the loop to any other node in the loop (wholly within the loop) // 2. The loop has a unique ENTRY, i.e. there is only one way to reach a node // in the loop from outside the loop, and that is through the ENTRY // Let's find the loop ENTRY BasicBlock* entry = FindEntry(head, top, bottom); if (entry == nullptr) { // For now, we only recognize loops where HEAD has some successor ENTRY in the loop. return false; } // Passed the basic checks; initialize instance state for this back-edge. this->head = head; this->top = top; this->entry = entry; this->bottom = bottom; this->lastExit = nullptr; this->exitCount = 0; if (!HasSingleEntryCycle()) { // There isn't actually a loop between TOP and BOTTOM return false; } if (!loopBlocks.IsMember(top->bbNum)) { // The "back-edge" we identified isn't actually part of the flow cycle containing ENTRY return false; } // Disqualify loops where the first block of the loop is less nested in EH than // the bottom block. That is, we don't want to handle loops where the back edge // goes from within an EH region to a first block that is outside that same EH // region. Note that we *do* handle loops where the first block is the *first* // block of a more nested EH region (since it is legal to branch to the first // block of an immediately more nested EH region). So, for example, disqualify // this: // // BB02 // ... // try { // ... // BB10 BBJ_COND => BB02 // ... // } // // Here, BB10 is more nested than BB02. if (bottom->hasTryIndex() && !comp->bbInTryRegions(bottom->getTryIndex(), top)) { JITDUMP("Loop 'top' " FMT_BB " is in an outer EH region compared to loop 'bottom' " FMT_BB ". Rejecting " "loop.\n", top->bbNum, bottom->bbNum); return false; } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Disqualify loops where the first block of the loop is a finally target. // The main problem is when multiple loops share a 'top' block that is a finally // target and we canonicalize the loops by adding a new loop head. In that case, we // need to update the blocks so the finally target bit is moved to the newly created // block, and removed from the old 'top' block. This is 'hard', so it's easier to disallow // the loop than to update the flow graph to support this case. if ((top->bbFlags & BBF_FINALLY_TARGET) != 0) { JITDUMP("Loop 'top' " FMT_BB " is a finally target. Rejecting loop.\n", top->bbNum); return false; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Compact the loop (sweep through it and move out any blocks that aren't part of the // flow cycle), and find the exits. if (!MakeCompactAndFindExits()) { // Unable to preserve well-formed loop during compaction. return false; } // We have a valid loop. return true; } //------------------------------------------------------------------------ // GetExitCount: Return the exit count computed for the loop // unsigned char GetExitCount() const { return exitCount; } private: //------------------------------------------------------------------------ // FindEntry: See if given HEAD flows to valid ENTRY between given TOP and BOTTOM // // Arguments: // head - Block to be the HEAD of any loop identified // top - Block to be the TOP of any loop identified // bottom - Block to be the BOTTOM of any loop identified // // Return Value: // Block to be the ENTRY of any loop identified, or nullptr if no // such entry meeting our criteria can be found. // // Notes: // Returns main entry if one is found, does not check for side-entries. // BasicBlock* FindEntry(BasicBlock* head, BasicBlock* top, BasicBlock* bottom) { if (head->bbJumpKind == BBJ_ALWAYS) { if (head->bbJumpDest->bbNum <= bottom->bbNum && head->bbJumpDest->bbNum >= top->bbNum) { // OK - we enter somewhere within the loop. // Cannot enter at the top - should have being caught by redundant jumps assert((head->bbJumpDest != top) || (head->bbFlags & BBF_KEEP_BBJ_ALWAYS)); return head->bbJumpDest; } else { // special case - don't consider now // assert (!"Loop entered in weird way!"); return nullptr; } } // Can we fall through into the loop? else if (head->KindIs(BBJ_NONE, BBJ_COND)) { // The ENTRY is at the TOP (a do-while loop) return top; } else { return nullptr; // HEAD does not flow into the loop; bail for now } } //------------------------------------------------------------------------ // HasSingleEntryCycle: Perform a reverse flow walk from ENTRY, visiting // only blocks between TOP and BOTTOM, to determine if such a cycle // exists and if it has a single entry. // // Return Value: // true - Found a single-entry cycle. // false - Did not find a single-entry cycle. // // Notes: // Will mark (in `loopBlocks`) all blocks found to participate in the cycle. // bool HasSingleEntryCycle() { // Now do a backwards flow walk from entry to see if we have a single-entry loop bool foundCycle = false; // Seed the loop block set and worklist with the entry block. loopBlocks.Reset(entry->bbNum); jitstd::list<BasicBlock*> worklist(comp->getAllocator(CMK_LoopOpt)); worklist.push_back(entry); while (!worklist.empty()) { BasicBlock* block = worklist.back(); worklist.pop_back(); // Make sure ENTRY dominates all blocks in the loop. if (block->bbNum > oldBlockMaxNum) { // This is a new block we added to connect fall-through, so the // recorded dominator information doesn't cover it. Just continue, // and when we process its unique predecessor we'll abort if ENTRY // doesn't dominate that. } else if (!comp->fgDominate(entry, block)) { return false; } // Add preds to the worklist, checking for side-entries. for (BasicBlock* const predBlock : block->PredBlocks()) { unsigned int testNum = PositionNum(predBlock); if ((testNum < top->bbNum) || (testNum > bottom->bbNum)) { // Pred is out of loop range if (block == entry) { if (predBlock == head) { // This is the single entry we expect. continue; } // ENTRY has some pred other than head outside the loop. If ENTRY does not // dominate this pred, we'll consider this a side-entry and skip this loop; // otherwise the loop is still valid and this may be a (flow-wise) back-edge // of an outer loop. For the dominance test, if `predBlock` is a new block, use // its unique predecessor since the dominator tree has info for that. BasicBlock* effectivePred = (predBlock->bbNum > oldBlockMaxNum ? predBlock->bbPrev : predBlock); if (comp->fgDominate(entry, effectivePred)) { // Outer loop back-edge continue; } } // There are multiple entries to this loop, don't consider it. return false; } bool isFirstVisit; if (predBlock == entry) { // We have indeed found a cycle in the flow graph. isFirstVisit = !foundCycle; foundCycle = true; assert(loopBlocks.IsMember(predBlock->bbNum)); } else if (loopBlocks.TestAndInsert(predBlock->bbNum)) { // Already visited this pred isFirstVisit = false; } else { // Add this predBlock to the worklist worklist.push_back(predBlock); isFirstVisit = true; } if (isFirstVisit && (predBlock->bbNext != nullptr) && (PositionNum(predBlock->bbNext) == predBlock->bbNum)) { // We've created a new block immediately after `predBlock` to // reconnect what was fall-through. Mark it as in-loop also; // it needs to stay with `prev` and if it exits the loop we'd // just need to re-create it if we tried to move it out. loopBlocks.Insert(predBlock->bbNext->bbNum); } } } return foundCycle; } //------------------------------------------------------------------------ // PositionNum: Get the number identifying a block's position per the // lexical ordering that existed before searching for (and compacting) // loops. // // Arguments: // block - Block whose position is desired. // // Return Value: // A number indicating that block's position relative to others. // // Notes: // When the given block is a new one created during loop compaction, // the number of its unique predecessor is returned. // unsigned int PositionNum(BasicBlock* block) { if (block->bbNum > oldBlockMaxNum) { // This must be a block we inserted to connect fall-through after moving blocks. // To determine if it's in the loop or not, use the number of its unique predecessor // block. assert(block->bbPreds->getBlock() == block->bbPrev); assert(block->bbPreds->flNext == nullptr); return block->bbPrev->bbNum; } return block->bbNum; } //------------------------------------------------------------------------ // MakeCompactAndFindExits: Compact the loop (sweep through it and move out // any blocks that aren't part of the flow cycle), and find the exits (set // lastExit and exitCount). // // Return Value: // true - Loop successfully compacted (or `loopBlocks` expanded to // include all blocks in the lexical range), exits enumerated. // false - Loop cannot be made compact and remain well-formed. // bool MakeCompactAndFindExits() { // Compaction (if it needs to happen) will require an insertion point. BasicBlock* moveAfter = nullptr; for (BasicBlock* previous = top->bbPrev; previous != bottom;) { BasicBlock* block = previous->bbNext; if (loopBlocks.IsMember(block->bbNum)) { // This block is a member of the loop. Check to see if it may exit the loop. CheckForExit(block); // Done processing this block; move on to the next. previous = block; continue; } // This blocks is lexically between TOP and BOTTOM, but it does not // participate in the flow cycle. Check for a run of consecutive // such blocks. BasicBlock* lastNonLoopBlock = block; BasicBlock* nextLoopBlock = block->bbNext; while (!loopBlocks.IsMember(nextLoopBlock->bbNum)) { lastNonLoopBlock = nextLoopBlock; nextLoopBlock = nextLoopBlock->bbNext; // This loop must terminate because we know BOTTOM is in loopBlocks. } // Choose an insertion point for non-loop blocks if we haven't yet done so. if (moveAfter == nullptr) { moveAfter = FindInsertionPoint(); } if (!BasicBlock::sameEHRegion(previous, nextLoopBlock) || !BasicBlock::sameEHRegion(previous, moveAfter)) { // EH regions would be ill-formed if we moved these blocks out. // See if we can consider them loop blocks without introducing // a side-entry. if (CanTreatAsLoopBlocks(block, lastNonLoopBlock)) { // The call to `canTreatAsLoop` marked these blocks as part of the loop; // iterate without updating `previous` so that we'll analyze them as part // of the loop. continue; } else { // We can't move these out of the loop or leave them in, so just give // up on this loop. return false; } } // Now physically move the blocks. BasicBlock* moveBefore = moveAfter->bbNext; comp->fgUnlinkRange(block, lastNonLoopBlock); comp->fgMoveBlocksAfter(block, lastNonLoopBlock, moveAfter); comp->ehUpdateLastBlocks(moveAfter, lastNonLoopBlock); // Apply any adjustments needed for fallthrough at the boundaries of the moved region. FixupFallThrough(moveAfter, moveBefore, block); FixupFallThrough(lastNonLoopBlock, nextLoopBlock, moveBefore); // Also apply any adjustments needed where the blocks were snipped out of the loop. BasicBlock* newBlock = FixupFallThrough(previous, block, nextLoopBlock); if (newBlock != nullptr) { // This new block is in the loop and is a loop exit. loopBlocks.Insert(newBlock->bbNum); lastExit = newBlock; ++exitCount; } // Update moveAfter for the next insertion. moveAfter = lastNonLoopBlock; // Note that we've changed the flow graph, and continue without updating // `previous` so that we'll process nextLoopBlock. changedFlowGraph = true; } if ((exitCount == 1) && (lastExit == nullptr)) { // If we happen to have a loop with two exits, one of which goes to an // infinite loop that's lexically nested inside it, where the inner loop // can't be moved out, we can end up in this situation (because // CanTreatAsLoopBlocks will have decremented the count expecting to find // another exit later). Bump the exit count to 2, since downstream code // will not be prepared for null lastExit with exitCount of 1. assert(forgotExit); exitCount = 2; } // Loop compaction was successful return true; } //------------------------------------------------------------------------ // FindInsertionPoint: Find an appropriate spot to which blocks that are // lexically between TOP and BOTTOM but not part of the flow cycle // can be moved. // // Return Value: // Block after which to insert moved blocks. // BasicBlock* FindInsertionPoint() { // Find an insertion point for blocks we're going to move. Move them down // out of the loop, and if possible find a spot that won't break up fall-through. BasicBlock* moveAfter = bottom; while (moveAfter->bbFallsThrough()) { // Keep looking for a better insertion point if we can. BasicBlock* newMoveAfter = TryAdvanceInsertionPoint(moveAfter); if (newMoveAfter == nullptr) { // Ran out of candidate insertion points, so just split up the fall-through. return moveAfter; } moveAfter = newMoveAfter; } return moveAfter; } //------------------------------------------------------------------------ // TryAdvanceInsertionPoint: Find the next legal insertion point after // the given one, if one exists. // // Arguments: // oldMoveAfter - Prior insertion point; find the next after this. // // Return Value: // The next block after `oldMoveAfter` that is a legal insertion point // (i.e. blocks being swept out of the loop can be moved immediately // after it), if one exists, else nullptr. // BasicBlock* TryAdvanceInsertionPoint(BasicBlock* oldMoveAfter) { BasicBlock* newMoveAfter = oldMoveAfter->bbNext; if (!BasicBlock::sameEHRegion(oldMoveAfter, newMoveAfter)) { // Don't cross an EH region boundary. return nullptr; } if (newMoveAfter->KindIs(BBJ_ALWAYS, BBJ_COND)) { unsigned int destNum = newMoveAfter->bbJumpDest->bbNum; if ((destNum >= top->bbNum) && (destNum <= bottom->bbNum) && !loopBlocks.IsMember(destNum)) { // Reversing this branch out of block `newMoveAfter` could confuse this algorithm // (in particular, the edge would still be numerically backwards but no longer be // lexically backwards, so a lexical forward walk from TOP would not find BOTTOM), // so don't do that. // We're checking for BBJ_ALWAYS and BBJ_COND only here -- we don't need to // check for BBJ_SWITCH because we'd never consider it a loop back-edge. return nullptr; } } // Similarly check to see if advancing to `newMoveAfter` would reverse the lexical order // of an edge from the run of blocks being moved to `newMoveAfter` -- doing so would // introduce a new lexical back-edge, which could (maybe?) confuse the loop search // algorithm, and isn't desirable layout anyway. for (BasicBlock* const predBlock : newMoveAfter->PredBlocks()) { unsigned int predNum = predBlock->bbNum; if ((predNum >= top->bbNum) && (predNum <= bottom->bbNum) && !loopBlocks.IsMember(predNum)) { // Don't make this forward edge a backwards edge. return nullptr; } } if (IsRecordedBottom(newMoveAfter)) { // This is the BOTTOM of another loop; don't move any blocks past it, to avoid moving them // out of that loop (we should have already done so when processing that loop if it were legal). return nullptr; } // Advancing the insertion point is ok, except that we can't split up any CallFinally/BBJ_ALWAYS // pair, so if we've got such a pair recurse to see if we can move past the whole thing. return (newMoveAfter->isBBCallAlwaysPair() ? TryAdvanceInsertionPoint(newMoveAfter) : newMoveAfter); } //------------------------------------------------------------------------ // isOuterBottom: Determine if the given block is the BOTTOM of a previously // recorded loop. // // Arguments: // block - Block to check for BOTTOM-ness. // // Return Value: // true - The blocks was recorded as `bottom` of some earlier-processed loop. // false - No loops yet recorded have this block as their `bottom`. // bool IsRecordedBottom(BasicBlock* block) { if (block->bbNum > oldBlockMaxNum) { // This is a new block, which can't be an outer bottom block because we only allow old blocks // as BOTTOM. return false; } return BlockSetOps::IsMember(comp, bottomBlocks, block->bbNum); } //------------------------------------------------------------------------ // CanTreatAsLoopBlocks: If the given range of blocks can be treated as // loop blocks, add them to loopBlockSet and return true. Otherwise, // return false. // // Arguments: // firstNonLoopBlock - First block in the run to be subsumed. // lastNonLoopBlock - Last block in the run to be subsumed. // // Return Value: // true - The blocks from `fistNonLoopBlock` to `lastNonLoopBlock` were // successfully added to `loopBlocks`. // false - Treating the blocks from `fistNonLoopBlock` to `lastNonLoopBlock` // would not be legal (it would induce a side-entry). // // Notes: // `loopBlocks` may be modified even if `false` is returned. // `exitCount` and `lastExit` may be modified if this process identifies // in-loop edges that were previously counted as exits. // bool CanTreatAsLoopBlocks(BasicBlock* firstNonLoopBlock, BasicBlock* lastNonLoopBlock) { for (BasicBlock* const testBlock : comp->Blocks(firstNonLoopBlock, lastNonLoopBlock)) { for (BasicBlock* const testPred : testBlock->PredBlocks()) { unsigned int predPosNum = PositionNum(testPred); unsigned int firstNonLoopPosNum = PositionNum(firstNonLoopBlock); unsigned int lastNonLoopPosNum = PositionNum(lastNonLoopBlock); if (loopBlocks.IsMember(predPosNum) || ((predPosNum >= firstNonLoopPosNum) && (predPosNum <= lastNonLoopPosNum))) { // This pred is in the loop (or what will be the loop if we determine this // run of exit blocks doesn't include a side-entry). if (predPosNum < firstNonLoopPosNum) { // We've already counted this block as an exit, so decrement the count. --exitCount; if (lastExit == testPred) { // Erase this now-bogus `lastExit` entry. lastExit = nullptr; INDEBUG(forgotExit = true); } } } else { // This pred is not in the loop, so this constitutes a side-entry. return false; } } // Either we're going to abort the loop on a subsequent testBlock, or this // testBlock is part of the loop. loopBlocks.Insert(testBlock->bbNum); } // All blocks were ok to leave in the loop. return true; } //------------------------------------------------------------------------ // FixupFallThrough: Re-establish any broken control flow connectivity // and eliminate any "goto-next"s that were created by changing the // given block's lexical follower. // // Arguments: // block - Block whose `bbNext` has changed. // oldNext - Previous value of `block->bbNext`. // newNext - New value of `block->bbNext`. // // Return Value: // If a new block is created to reconnect flow, the new block is // returned; otherwise, nullptr. // BasicBlock* FixupFallThrough(BasicBlock* block, BasicBlock* oldNext, BasicBlock* newNext) { // If we create a new block, that will be our return value. BasicBlock* newBlock = nullptr; if (block->bbFallsThrough()) { // Need to reconnect the flow from `block` to `oldNext`. if ((block->bbJumpKind == BBJ_COND) && (block->bbJumpDest == newNext)) { // Reverse the jump condition GenTree* test = block->lastNode(); noway_assert(test->OperIsConditionalJump()); if (test->OperGet() == GT_JTRUE) { GenTree* cond = comp->gtReverseCond(test->AsOp()->gtOp1); assert(cond == test->AsOp()->gtOp1); // Ensure `gtReverseCond` did not create a new node. test->AsOp()->gtOp1 = cond; } else { comp->gtReverseCond(test); } // Redirect the Conditional JUMP to go to `oldNext` block->bbJumpDest = oldNext; } else { // Insert an unconditional jump to `oldNext` just after `block`. newBlock = comp->fgConnectFallThrough(block, oldNext); noway_assert((newBlock == nullptr) || loopBlocks.CanRepresent(newBlock->bbNum)); } } else if ((block->bbJumpKind == BBJ_ALWAYS) && (block->bbJumpDest == newNext)) { // We've made `block`'s jump target its bbNext, so remove the jump. if (!comp->fgOptimizeBranchToNext(block, newNext, block->bbPrev)) { // If optimizing away the goto-next failed for some reason, mark it KEEP_BBJ_ALWAYS to // prevent assertions from complaining about it. block->bbFlags |= BBF_KEEP_BBJ_ALWAYS; } } // Make sure we don't leave around a goto-next unless it's marked KEEP_BBJ_ALWAYS. assert(!block->KindIs(BBJ_COND, BBJ_ALWAYS) || (block->bbJumpDest != newNext) || ((block->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0)); return newBlock; } //------------------------------------------------------------------------ // CheckForExit: Check if the given block has any successor edges that are // loop exits, and update `lastExit` and `exitCount` if so. // // Arguments: // block - Block whose successor edges are to be checked. // // Notes: // If one block has multiple exiting successor edges, those are counted // as multiple exits in `exitCount`. // void CheckForExit(BasicBlock* block) { BasicBlock* exitPoint; switch (block->bbJumpKind) { case BBJ_COND: case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: assert(block->bbJumpDest); exitPoint = block->bbJumpDest; if (!loopBlocks.IsMember(exitPoint->bbNum)) { // Exit from a block other than BOTTOM lastExit = block; exitCount++; } break; case BBJ_NONE: break; case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: // The "try" associated with this "finally" must be in the same loop, so the // finally block will return control inside the loop. break; case BBJ_THROW: case BBJ_RETURN: // Those are exits from the loop lastExit = block; exitCount++; break; case BBJ_SWITCH: for (BasicBlock* const exitPoint : block->SwitchTargets()) { if (!loopBlocks.IsMember(exitPoint->bbNum)) { lastExit = block; exitCount++; } } break; default: noway_assert(!"Unexpected bbJumpKind"); break; } if (block->bbFallsThrough() && !loopBlocks.IsMember(block->bbNext->bbNum)) { // Found a fall-through exit. lastExit = block; exitCount++; } } }; } // end (anonymous) namespace //------------------------------------------------------------------------ // optFindNaturalLoops: Find the natural loops, using dominators. Note that the test for // a loop is slightly different from the standard one, because we have not done a depth // first reordering of the basic blocks. // // See LoopSearch class comment header for a description of the loops found. // // We will find and record a maximum of BasicBlock::MAX_LOOP_NUM loops (currently 64). // void Compiler::optFindNaturalLoops() { #ifdef DEBUG if (verbose) { printf("*************** In optFindNaturalLoops()\n"); } #endif // DEBUG noway_assert(fgDomsComputed); assert(fgHasLoops); #if COUNT_LOOPS hasMethodLoops = false; loopsThisMethod = 0; loopOverflowThisMethod = false; #endif LoopSearch search(this); for (BasicBlock* head = fgFirstBB; head->bbNext != nullptr; head = head->bbNext) { BasicBlock* top = head->bbNext; // Blocks that are rarely run have a zero bbWeight and should never be optimized here. if (top->bbWeight == BB_ZERO_WEIGHT) { continue; } for (BasicBlock* const predBlock : top->PredBlocks()) { if (search.FindLoop(head, top, predBlock)) { // Found a loop; record it and see if we've hit the limit. bool recordedLoop = search.RecordLoop(); (void)recordedLoop; // avoid unusued variable warnings in COUNT_LOOPS and !DEBUG #if COUNT_LOOPS if (!hasMethodLoops) { // Mark the method as containing natural loops totalLoopMethods++; hasMethodLoops = true; } // Increment total number of loops found totalLoopCount++; loopsThisMethod++; // Keep track of the number of exits loopExitCountTable.record(static_cast<unsigned>(search.GetExitCount())); // Note that we continue to look for loops even if // (optLoopCount == BasicBlock::MAX_LOOP_NUM), in contrast to the !COUNT_LOOPS code below. // This gives us a better count and stats. Hopefully it doesn't affect actual codegen. CLANG_FORMAT_COMMENT_ANCHOR; #else // COUNT_LOOPS assert(recordedLoop); if (optLoopCount == BasicBlock::MAX_LOOP_NUM) { // We won't be able to record any more loops, so stop looking. goto NO_MORE_LOOPS; } #endif // COUNT_LOOPS // Continue searching preds of `top` to see if any other are // back-edges (this can happen for nested loops). The iteration // is safe because the compaction we do only modifies predecessor // lists of blocks that gain or lose fall-through from their // `bbPrev`, but since the motion is from within the loop to below // it, we know we're not altering the relationship between `top` // and its `bbPrev`. } } } #if !COUNT_LOOPS NO_MORE_LOOPS: #endif // !COUNT_LOOPS #if COUNT_LOOPS loopCountTable.record(loopsThisMethod); if (maxLoopsPerMethod < loopsThisMethod) { maxLoopsPerMethod = loopsThisMethod; } if (loopOverflowThisMethod) { totalLoopOverflows++; } #endif // COUNT_LOOPS bool mod = search.ChangedFlowGraph(); if (mod) { // Need to renumber blocks now since loop canonicalization // depends on it; can defer the rest of fgUpdateChangedFlowGraph() // until after canonicalizing loops. Dominator information is // recorded in terms of block numbers, so flag it invalid. fgDomsComputed = false; fgRenumberBlocks(); } // Now the loop indices are stable. We can figure out parent/child relationships // (using table indices to name loops), and label blocks. for (unsigned char loopInd = 1; loopInd < optLoopCount; loopInd++) { for (unsigned char possibleParent = loopInd; possibleParent > 0;) { possibleParent--; if (optLoopTable[possibleParent].lpContains(optLoopTable[loopInd])) { optLoopTable[loopInd].lpParent = possibleParent; optLoopTable[loopInd].lpSibling = optLoopTable[possibleParent].lpChild; optLoopTable[possibleParent].lpChild = loopInd; break; } } } // Now label the blocks with the innermost loop to which they belong. Since parents // precede children in the table, doing the labeling for each loop in order will achieve // this -- the innermost loop labeling will be done last. (Inner loop blocks will be // labeled multiple times before being correct at the end.) for (unsigned char loopInd = 0; loopInd < optLoopCount; loopInd++) { for (BasicBlock* const blk : optLoopTable[loopInd].LoopBlocks()) { blk->bbNatLoopNum = loopInd; } } // Make sure that loops are canonical: that every loop has a unique "top", by creating an empty "nop" // one, if necessary, for loops containing others that share a "top." for (unsigned char loopInd = 0; loopInd < optLoopCount; loopInd++) { // Traverse the outermost loops as entries into the loop nest; so skip non-outermost. if (optLoopTable[loopInd].lpParent != BasicBlock::NOT_IN_LOOP) { continue; } // Otherwise... if (optCanonicalizeLoopNest(loopInd)) { mod = true; } } if (mod) { constexpr bool computePreds = true; fgUpdateChangedFlowGraph(computePreds); } if (false /* pre-header stress */) { // Stress mode: aggressively create loop pre-header for every loop. for (unsigned loopInd = 0; loopInd < optLoopCount; loopInd++) { fgCreateLoopPreHeader(loopInd); } if (fgModified) { // The predecessors were maintained in fgCreateLoopPreHeader; don't rebuild them. constexpr bool computePreds = false; constexpr bool computeDoms = true; fgUpdateChangedFlowGraph(computePreds, computeDoms); } } #ifdef DEBUG if (verbose && (optLoopCount > 0)) { optPrintLoopTable(); } #endif // DEBUG } //------------------------------------------------------------------------ // optIdentifyLoopsForAlignment: Determine which loops should be considered for alignment. // // All innermost loops whose block weight meets a threshold are candidates for alignment. // The `first` block of the loop is marked with the BBF_LOOP_ALIGN flag to indicate this // (the loop table itself is not changed). // // Depends on the loop table, and on block weights being set. // void Compiler::optIdentifyLoopsForAlignment() { #if FEATURE_LOOP_ALIGN if (codeGen->ShouldAlignLoops()) { for (BasicBlock::loopNumber loopInd = 0; loopInd < optLoopCount; loopInd++) { // An innerloop candidate that might need alignment if (optLoopTable[loopInd].lpChild == BasicBlock::NOT_IN_LOOP) { BasicBlock* top = optLoopTable[loopInd].lpTop; weight_t topWeight = top->getBBWeight(this); if (topWeight >= (opts.compJitAlignLoopMinBlockWeight * BB_UNITY_WEIGHT)) { // Sometimes with JitOptRepeat > 1, we might end up finding the loops twice. In such // cases, make sure to count them just once. if (!top->isLoopAlign()) { loopAlignCandidates++; top->bbFlags |= BBF_LOOP_ALIGN; JITDUMP(FMT_LP " that starts at " FMT_BB " needs alignment, weight=" FMT_WT ".\n", loopInd, top->bbNum, top->getBBWeight(this)); } } else { JITDUMP("Skip alignment for " FMT_LP " that starts at " FMT_BB " weight=" FMT_WT ".\n", loopInd, top->bbNum, topWeight); } } } } #endif } //------------------------------------------------------------------------ // optRedirectBlock: Replace the branch successors of a block based on a block map. // // Updates the successors of `blk`: if `blk2` is a branch successor of `blk`, and there is a mapping // for `blk2->blk3` in `redirectMap`, change `blk` so that `blk3` is this branch successor. // // Note that fall-through successors are not modified, including predecessor lists. // // Arguments: // blk - block to redirect // redirectMap - block->block map specifying how the `blk` target will be redirected. // updatePreds - if `true`, update the predecessor lists to match. // void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds) { BasicBlock* newJumpDest = nullptr; switch (blk->bbJumpKind) { case BBJ_NONE: case BBJ_THROW: case BBJ_RETURN: case BBJ_EHFILTERRET: case BBJ_EHFINALLYRET: case BBJ_EHCATCHRET: // These have no jump destination to update. break; case BBJ_ALWAYS: case BBJ_LEAVE: case BBJ_CALLFINALLY: case BBJ_COND: // All of these have a single jump destination to update. if (redirectMap->Lookup(blk->bbJumpDest, &newJumpDest)) { if (updatePreds) { fgRemoveRefPred(blk->bbJumpDest, blk); fgAddRefPred(newJumpDest, blk); } blk->bbJumpDest = newJumpDest; } break; case BBJ_SWITCH: { bool redirected = false; for (unsigned i = 0; i < blk->bbJumpSwt->bbsCount; i++) { BasicBlock* switchDest = blk->bbJumpSwt->bbsDstTab[i]; if (redirectMap->Lookup(switchDest, &newJumpDest)) { if (updatePreds) { fgRemoveRefPred(switchDest, blk); fgAddRefPred(newJumpDest, blk); } blk->bbJumpSwt->bbsDstTab[i] = newJumpDest; redirected = true; } } // If any redirections happened, invalidate the switch table map for the switch. if (redirected) { // Don't create a new map just to try to remove an entry. BlockToSwitchDescMap* switchMap = GetSwitchDescMap(/* createIfNull */ false); if (switchMap != nullptr) { switchMap->Remove(blk); } } } break; default: unreached(); } } // TODO-Cleanup: This should be a static member of the BasicBlock class. void Compiler::optCopyBlkDest(BasicBlock* from, BasicBlock* to) { assert(from->bbJumpKind == to->bbJumpKind); // Precondition. // copy the jump destination(s) from "from" to "to". switch (to->bbJumpKind) { case BBJ_ALWAYS: case BBJ_LEAVE: case BBJ_CALLFINALLY: case BBJ_COND: // All of these have a single jump destination to update. to->bbJumpDest = from->bbJumpDest; break; case BBJ_SWITCH: to->bbJumpSwt = new (this, CMK_BasicBlock) BBswtDesc(this, from->bbJumpSwt); break; default: break; } } // Returns true if 'block' is an entry block for any loop in 'optLoopTable' bool Compiler::optIsLoopEntry(BasicBlock* block) const { for (unsigned char loopInd = 0; loopInd < optLoopCount; loopInd++) { if ((optLoopTable[loopInd].lpFlags & LPFLG_REMOVED) != 0) { continue; } if (optLoopTable[loopInd].lpEntry == block) { return true; } } return false; } // Canonicalize the loop nest rooted at parent loop 'loopInd'. // Returns 'true' if the flow graph is modified. bool Compiler::optCanonicalizeLoopNest(unsigned char loopInd) { bool modified = false; // Is the top of the current loop in any nested loop? if (optLoopTable[loopInd].lpTop->bbNatLoopNum != loopInd) { if (optCanonicalizeLoop(loopInd)) { modified = true; } } for (unsigned char child = optLoopTable[loopInd].lpChild; // child != BasicBlock::NOT_IN_LOOP; // child = optLoopTable[child].lpSibling) { if (optCanonicalizeLoopNest(child)) { modified = true; } } return modified; } bool Compiler::optCanonicalizeLoop(unsigned char loopInd) { // Is the top uniquely part of the current loop? BasicBlock* t = optLoopTable[loopInd].lpTop; if (t->bbNatLoopNum == loopInd) { return false; } JITDUMP("in optCanonicalizeLoop: " FMT_LP " has top " FMT_BB " (bottom " FMT_BB ") with natural loop number " FMT_LP ": need to canonicalize\n", loopInd, t->bbNum, optLoopTable[loopInd].lpBottom->bbNum, t->bbNatLoopNum); // Otherwise, the top of this loop is also part of a nested loop. // // Insert a new unique top for this loop. We must be careful to put this new // block in the correct EH region. Note that t->bbPrev might be in a different // EH region. For example: // // try { // ... // BB07 // } // BB08 // "first" // // In this case, first->bbPrev is BB07, which is in a different 'try' region. // On the other hand, the first block of multiple loops might be the first // block of a 'try' region that is completely contained in the multiple loops. // for example: // // BB08 try { } // ... // BB10 BBJ_ALWAYS => BB08 // ... // BB12 BBJ_ALWAYS => BB08 // // Here, we have two loops, both with BB08 as the "first" block. Block BB08 // is a single-block "try" region. Neither loop "bottom" block is in the same // "try" region as BB08. This is legal because you can jump to the first block // of a try region. With EH normalization, no two "try" regions will share // this block. In this case, we need to insert a new block for the outer loop // in the same EH region as the branch from the "bottom": // // BB30 BBJ_NONE // BB08 try { } // ... // BB10 BBJ_ALWAYS => BB08 // ... // BB12 BBJ_ALWAYS => BB30 // // Another possibility is that the "first" block of the loop nest can be the first block // of a "try" region that also has other predecessors than those in the loop, or even in // the "try" region (since blocks can target the first block of a "try" region). For example: // // BB08 try { // ... // BB10 BBJ_ALWAYS => BB08 // ... // BB12 BBJ_ALWAYS => BB08 // BB13 } // ... // BB20 BBJ_ALWAYS => BB08 // ... // BB25 BBJ_ALWAYS => BB08 // // Here, BB08 has 4 flow graph predecessors: BB10, BB12, BB20, BB25. These are all potential loop // bottoms, for four possible nested loops. However, we require all the loop bottoms to be in the // same EH region. For loops BB08..BB10 and BB08..BB12, we need to add a new "top" block within // the try region, immediately before BB08. The bottom of the loop BB08..BB10 loop will target the // old BB08, and the bottom of the BB08..BB12 loop will target the new loop header. The other branches // (BB20, BB25) must target the new loop header, both for correctness, and to avoid the illegal // situation of branching to a non-first block of a 'try' region. // // We can also have a loop nest where the "first" block is outside of a "try" region // and the back edges are inside a "try" region, for example: // // BB02 // "first" // ... // BB09 try { BBJ_COND => BB02 // ... // BB15 BBJ_COND => BB02 // ... // BB21 } // end of "try" // // In this case, both loop back edges were formed by "leave" instructions that were // imported into branches that were later made conditional. In this case, we don't // want to copy the EH region of the back edge, since that would create a block // outside of and disjoint with the "try" region of the back edge. However, to // simplify things, we disqualify this type of loop, so we should never see this here. BasicBlock* h = optLoopTable[loopInd].lpHead; BasicBlock* b = optLoopTable[loopInd].lpBottom; // The loop must be entirely contained within a single handler region. assert(BasicBlock::sameHndRegion(t, b)); // If the bottom block is in the same "try" region, then we extend the EH // region. Otherwise, we add the new block outside the "try" region. const bool extendRegion = BasicBlock::sameTryRegion(t, b); BasicBlock* newT = fgNewBBbefore(BBJ_NONE, t, extendRegion); if (!extendRegion) { // We need to set the EH region manually. Set it to be the same // as the bottom block. newT->copyEHRegion(b); } // The new block can reach the same set of blocks as the old one, but don't try to reflect // that in its reachability set here -- creating the new block may have changed the BlockSet // representation from short to long, and canonicalizing loops is immediately followed by // a call to fgUpdateChangedFlowGraph which will recompute the reachability sets anyway. // Redirect the "bottom" of the current loop to "newT". BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopOpt)) BlockToBlockMap(getAllocator(CMK_LoopOpt)); blockMap->Set(t, newT); optRedirectBlock(b, blockMap); // Redirect non-loop preds of "t" to also go to "newT". Inner loops that also branch to "t" should continue // to do so. However, there maybe be other predecessors from outside the loop nest that need to be updated // to point to "newT". This normally wouldn't happen, since they too would be part of the loop nest. However, // they might have been prevented from participating in the loop nest due to different EH nesting, or some // other reason. // // Note that optRedirectBlock doesn't update the predecessors list. So, if the same 't' block is processed // multiple times while canonicalizing multiple loop nests, we'll attempt to redirect a predecessor multiple times. // This is ok, because after the first redirection, the topPredBlock branch target will no longer match the source // edge of the blockMap, so nothing will happen. bool firstPred = true; for (BasicBlock* const topPredBlock : t->PredBlocks()) { // Skip if topPredBlock is in the loop. // Note that this uses block number to detect membership in the loop. We are adding blocks during // canonicalization, and those block numbers will be new, and larger than previous blocks. However, we work // outside-in, so we shouldn't encounter the new blocks at the loop boundaries, or in the predecessor lists. if (t->bbNum <= topPredBlock->bbNum && topPredBlock->bbNum <= b->bbNum) { JITDUMP("in optCanonicalizeLoop: 'top' predecessor " FMT_BB " is in the range of " FMT_LP " (" FMT_BB ".." FMT_BB "); not redirecting its bottom edge\n", topPredBlock->bbNum, loopInd, t->bbNum, b->bbNum); continue; } JITDUMP("in optCanonicalizeLoop: redirect top predecessor " FMT_BB " to " FMT_BB "\n", topPredBlock->bbNum, newT->bbNum); optRedirectBlock(topPredBlock, blockMap); // When we have profile data then the 'newT' block will inherit topPredBlock profile weight if (topPredBlock->hasProfileWeight()) { // This corrects an issue when the topPredBlock has a profile based weight // if (firstPred) { JITDUMP("in optCanonicalizeLoop: block " FMT_BB " will inheritWeight from " FMT_BB "\n", newT->bbNum, topPredBlock->bbNum); newT->inheritWeight(topPredBlock); firstPred = false; } else { JITDUMP("in optCanonicalizeLoop: block " FMT_BB " will also contribute to the weight of " FMT_BB "\n", newT->bbNum, topPredBlock->bbNum); weight_t newWeight = newT->getBBWeight(this) + topPredBlock->getBBWeight(this); newT->setBBProfileWeight(newWeight); } } } assert(newT->bbNext == t); // If it had been a do-while loop (top == entry), update entry, as well. BasicBlock* origE = optLoopTable[loopInd].lpEntry; if (optLoopTable[loopInd].lpTop == origE) { optLoopTable[loopInd].lpEntry = newT; } optLoopTable[loopInd].lpTop = newT; newT->bbNatLoopNum = loopInd; JITDUMP("in optCanonicalizeLoop: made new block " FMT_BB " [%p] the new unique top of loop %d.\n", newT->bbNum, dspPtr(newT), loopInd); // Make sure the head block still goes to the entry... if (h->bbJumpKind == BBJ_NONE && h->bbNext != optLoopTable[loopInd].lpEntry) { h->bbJumpKind = BBJ_ALWAYS; h->bbJumpDest = optLoopTable[loopInd].lpEntry; } else if (h->bbJumpKind == BBJ_COND && h->bbNext == newT && newT != optLoopTable[loopInd].lpEntry) { BasicBlock* h2 = fgNewBBafter(BBJ_ALWAYS, h, /*extendRegion*/ true); optLoopTable[loopInd].lpHead = h2; h2->bbJumpDest = optLoopTable[loopInd].lpEntry; h2->bbStmtList = nullptr; fgInsertStmtAtEnd(h2, fgNewStmtFromTree(gtNewOperNode(GT_NOP, TYP_VOID, nullptr))); } // If any loops nested in "loopInd" have the same head and entry as "loopInd", // it must be the case that they were do-while's (since "h" fell through to the entry). // The new node "newT" becomes the head of such loops. for (unsigned char childLoop = optLoopTable[loopInd].lpChild; // childLoop != BasicBlock::NOT_IN_LOOP; // childLoop = optLoopTable[childLoop].lpSibling) { if (optLoopTable[childLoop].lpEntry == origE && optLoopTable[childLoop].lpHead == h && newT->bbJumpKind == BBJ_NONE && newT->bbNext == origE) { optUpdateLoopHead(childLoop, h, newT); } } return true; } //----------------------------------------------------------------------------- // optLoopContains: Check if one loop contains another // // Arguments: // l1 -- loop num of containing loop (must be valid loop num) // l2 -- loop num of contained loop (valid loop num, or NOT_IN_LOOP) // // Returns: // True if loop described by l2 is contained within l1. // // Notes: // A loop contains itself. // bool Compiler::optLoopContains(unsigned l1, unsigned l2) const { assert(l1 < optLoopCount); assert((l2 < optLoopCount) || (l2 == BasicBlock::NOT_IN_LOOP)); if (l1 == l2) { return true; } else if (l2 == BasicBlock::NOT_IN_LOOP) { return false; } else { return optLoopContains(l1, optLoopTable[l2].lpParent); } } void Compiler::optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to) { assert(optLoopTable[loopInd].lpHead == from); optLoopTable[loopInd].lpHead = to; for (unsigned char childLoop = optLoopTable[loopInd].lpChild; childLoop != BasicBlock::NOT_IN_LOOP; childLoop = optLoopTable[childLoop].lpSibling) { if (optLoopTable[childLoop].lpHead == from) { optUpdateLoopHead(childLoop, from, to); } } } //----------------------------------------------------------------------------- // optIterSmallOverflow: Helper for loop unrolling. Determine if "i += const" will // cause an overflow exception for the small types. // // Arguments: // iterAtExit - iteration constant at loop exit // incrType - type of increment // // Returns: // true if overflow // // static bool Compiler::optIterSmallOverflow(int iterAtExit, var_types incrType) { int type_MAX; switch (incrType) { case TYP_BYTE: type_MAX = SCHAR_MAX; break; case TYP_UBYTE: type_MAX = UCHAR_MAX; break; case TYP_SHORT: type_MAX = SHRT_MAX; break; case TYP_USHORT: type_MAX = USHRT_MAX; break; case TYP_UINT: // Detected by checking for 32bit .... case TYP_INT: return false; // ... overflow same as done for TYP_INT default: NO_WAY("Bad type"); } if (iterAtExit > type_MAX) { return true; } else { return false; } } //----------------------------------------------------------------------------- // optIterSmallUnderflow: Helper for loop unrolling. Determine if "i -= const" will // cause an underflow exception for the small types. // // Arguments: // iterAtExit - iteration constant at loop exit // decrType - type of decrement // // Returns: // true if overflow // // static bool Compiler::optIterSmallUnderflow(int iterAtExit, var_types decrType) { int type_MIN; switch (decrType) { case TYP_BYTE: type_MIN = SCHAR_MIN; break; case TYP_SHORT: type_MIN = SHRT_MIN; break; case TYP_UBYTE: type_MIN = 0; break; case TYP_USHORT: type_MIN = 0; break; case TYP_UINT: // Detected by checking for 32bit .... case TYP_INT: return false; // ... underflow same as done for TYP_INT default: NO_WAY("Bad type"); } if (iterAtExit < type_MIN) { return true; } else { return false; } } //----------------------------------------------------------------------------- // optComputeLoopRep: Helper for loop unrolling. Computes the number of repetitions // in a constant loop. // // Arguments: // constInit - loop constant initial value // constLimit - loop constant limit // iterInc - loop iteration increment // iterOper - loop iteration increment operator (ADD, SUB, etc.) // iterOperType - iteration operator type // testOper - type of loop test (i.e. GT_LE, GT_GE, etc.) // unsTest - true if test is unsigned // dupCond - true if the loop head contains a test which skips this loop // iterCount - *iterCount is set to the iteration count, if the function returns `true` // // Returns: // true if the loop has a constant repetition count, false if that cannot be proven // bool Compiler::optComputeLoopRep(int constInit, int constLimit, int iterInc, genTreeOps iterOper, var_types iterOperType, genTreeOps testOper, bool unsTest, bool dupCond, unsigned* iterCount) { noway_assert(genActualType(iterOperType) == TYP_INT); __int64 constInitX; __int64 constLimitX; unsigned loopCount; int iterSign; // Using this, we can just do a signed comparison with other 32 bit values. if (unsTest) { constLimitX = (unsigned int)constLimit; } else { constLimitX = (signed int)constLimit; } switch (iterOperType) { // For small types, the iteration operator will narrow these values if big #define INIT_ITER_BY_TYPE(type) \ constInitX = (type)constInit; \ iterInc = (type)iterInc; case TYP_BYTE: INIT_ITER_BY_TYPE(signed char); break; case TYP_UBYTE: INIT_ITER_BY_TYPE(unsigned char); break; case TYP_SHORT: INIT_ITER_BY_TYPE(signed short); break; case TYP_USHORT: INIT_ITER_BY_TYPE(unsigned short); break; // For the big types, 32 bit arithmetic is performed case TYP_INT: case TYP_UINT: if (unsTest) { constInitX = (unsigned int)constInit; } else { constInitX = (signed int)constInit; } break; default: noway_assert(!"Bad type"); NO_WAY("Bad type"); } // If iterInc is zero we have an infinite loop. if (iterInc == 0) { return false; } // Set iterSign to +1 for positive iterInc and -1 for negative iterInc. iterSign = (iterInc > 0) ? +1 : -1; // Initialize loopCount to zero. loopCount = 0; // If dupCond is true then the loop head contains a test which skips // this loop, if the constInit does not pass the loop test. // Such a loop can execute zero times. // If dupCond is false then we have a true do-while loop which we // always execute the loop once before performing the loop test if (!dupCond) { loopCount += 1; constInitX += iterInc; } // bail if count is based on wrap-around math if (iterInc > 0) { if (constLimitX < constInitX) { return false; } } else if (constLimitX > constInitX) { return false; } // Compute the number of repetitions. switch (testOper) { __int64 iterAtExitX; case GT_EQ: // Something like "for (i=init; i == lim; i++)" doesn't make any sense. return false; case GT_NE: // Consider: "for (i = init; i != lim; i += const)" // This is tricky since it may have a constant number of iterations or loop forever. // We have to compute "(lim - init) mod iterInc" to see if it is zero. // If "mod iterInc" is not zero then the limit test will miss and a wrap will occur // which is probably not what the end user wanted, but it is legal. if (iterInc > 0) { // Stepping by one, i.e. Mod with 1 is always zero. if (iterInc != 1) { if (((constLimitX - constInitX) % iterInc) != 0) { return false; } } } else { noway_assert(iterInc < 0); // Stepping by -1, i.e. Mod with 1 is always zero. if (iterInc != -1) { if (((constInitX - constLimitX) % (-iterInc)) != 0) { return false; } } } switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX != constLimitX) { loopCount += (unsigned)((constLimitX - constInitX - iterSign) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if iteration incr will cause overflow for small types if (optIterSmallOverflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit overflow. Bad for TYP_(U)INT if (iterAtExitX < constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } case GT_LT: switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX < constLimitX) { loopCount += (unsigned)((constLimitX - constInitX - iterSign) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if iteration incr will cause overflow for small types if (optIterSmallOverflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit overflow. Bad for TYP_(U)INT if (iterAtExitX < constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } case GT_LE: switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX <= constLimitX) { loopCount += (unsigned)((constLimitX - constInitX) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if iteration incr will cause overflow for small types if (optIterSmallOverflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit overflow. Bad for TYP_(U)INT if (iterAtExitX <= constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } case GT_GT: switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX > constLimitX) { loopCount += (unsigned)((constLimitX - constInitX - iterSign) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if small types will underflow if (optIterSmallUnderflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit underflow. Bad for TYP_INT and unsigneds if (iterAtExitX > constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } case GT_GE: switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX >= constLimitX) { loopCount += (unsigned)((constLimitX - constInitX) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if small types will underflow if (optIterSmallUnderflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit underflow. Bad for TYP_INT and unsigneds if (iterAtExitX >= constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } default: noway_assert(!"Unknown operator for loop condition"); } return false; } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif //----------------------------------------------------------------------------- // optUnrollLoops: Look for loop unrolling candidates and unroll them. // // Loops must be of the form: // for (i=icon; i<icon; i++) { ... } // // Loops handled are fully unrolled; there is no partial unrolling. // // Limitations: only the following loop types are handled: // 1. "while" loops (top entry) // 2. constant initializer, constant bound // 3. The entire loop must be in the same EH region. // 4. The loop iteration variable can't be address exposed. // 5. The loop iteration variable can't be a promoted struct field. // 6. We must be able to calculate the total constant iteration count. // 7. On x86, there is a limit to the number of return blocks. So if there are return blocks in the loop that // would be unrolled, the unrolled code can't exceed that limit. // // Cost heuristics: // 1. there are cost metrics for maximum number of allowed iterations, and maximum unroll size // 2. single-iteration loops are always allowed (to eliminate the loop structure). // 3. otherwise, only loops where the limit is Vector<T>.Length are currently allowed // // In stress modes, these heuristic limits are expanded, and loops aren't required to have the // Vector<T>.Length limit. // // Loops are processed from innermost to outermost order, to attempt to unroll the most nested loops first. // // Returns: // suitable phase status // PhaseStatus Compiler::optUnrollLoops() { if (compCodeOpt() == SMALL_CODE) { return PhaseStatus::MODIFIED_NOTHING; } if (optLoopCount == 0) { return PhaseStatus::MODIFIED_NOTHING; } #ifdef DEBUG if (JitConfig.JitNoUnroll()) { return PhaseStatus::MODIFIED_NOTHING; } #endif #ifdef DEBUG if (verbose) { printf("*************** In optUnrollLoops()\n"); } #endif /* Look for loop unrolling candidates */ bool change = false; bool anyNestedLoopsUnrolled = false; INDEBUG(int unrollCount = 0); // count of loops unrolled INDEBUG(int unrollFailures = 0); // count of loops attempted to be unrolled, but failed static const unsigned ITER_LIMIT[COUNT_OPT_CODE + 1] = { 10, // BLENDED_CODE 0, // SMALL_CODE 20, // FAST_CODE 0 // COUNT_OPT_CODE }; assert(ITER_LIMIT[SMALL_CODE] == 0); assert(ITER_LIMIT[COUNT_OPT_CODE] == 0); unsigned iterLimit = ITER_LIMIT[compCodeOpt()]; #ifdef DEBUG if (compStressCompile(STRESS_UNROLL_LOOPS, 50)) { iterLimit *= 10; } #endif static const int UNROLL_LIMIT_SZ[COUNT_OPT_CODE + 1] = { 300, // BLENDED_CODE 0, // SMALL_CODE 600, // FAST_CODE 0 // COUNT_OPT_CODE }; assert(UNROLL_LIMIT_SZ[SMALL_CODE] == 0); assert(UNROLL_LIMIT_SZ[COUNT_OPT_CODE] == 0); // Visit loops from highest to lowest number to visit them in innermost to outermost order. for (unsigned lnum = optLoopCount - 1; lnum != ~0U; --lnum) { // This is necessary due to an apparent analysis limitation since // optLoopCount must be strictly greater than 0 upon entry and lnum // cannot wrap due to the loop termination condition. PREFAST_ASSUME(lnum != 0U - 1); LoopDsc& loop = optLoopTable[lnum]; BasicBlock* head; BasicBlock* top; BasicBlock* bottom; BasicBlock* initBlock; bool dupCond; // Does the 'head' block contain a duplicate loop condition (zero trip test)? int lbeg; // initial value for iterator int llim; // limit value for iterator unsigned lvar; // iterator lclVar # int iterInc; // value to increment the iterator genTreeOps iterOper; // type of iterator increment (i.e. ADD, SUB, etc.) var_types iterOperType; // type result of the oper (for overflow instrs) genTreeOps testOper; // type of loop test (i.e. GT_LE, GT_GE, etc.) bool unsTest; // Is the comparison unsigned? unsigned loopRetCount; // number of BBJ_RETURN blocks in loop unsigned totalIter; // total number of iterations in the constant loop const unsigned loopFlags = loop.lpFlags; // Check for required flags: // LPFLG_CONST_INIT - required because this transform only handles full unrolls // LPFLG_CONST_LIMIT - required because this transform only handles full unrolls const unsigned requiredFlags = LPFLG_CONST_INIT | LPFLG_CONST_LIMIT; if ((loopFlags & requiredFlags) != requiredFlags) { // Don't print to the JitDump about this common case. continue; } // Ignore if removed or marked as not unrollable. if (loopFlags & (LPFLG_DONT_UNROLL | LPFLG_REMOVED)) { // Don't print to the JitDump about this common case. continue; } // This transform only handles loops of this form if (!loop.lpIsTopEntry()) { JITDUMP("Failed to unroll loop " FMT_LP ": not top entry\n", lnum); continue; } head = loop.lpHead; noway_assert(head != nullptr); top = loop.lpTop; noway_assert(top != nullptr); bottom = loop.lpBottom; noway_assert(bottom != nullptr); // Get the loop data: // - initial constant // - limit constant // - iterator // - iterator increment // - increment operation type (i.e. ADD, SUB, etc...) // - loop test type (i.e. GT_GE, GT_LT, etc...) initBlock = loop.lpInitBlock; lbeg = loop.lpConstInit; llim = loop.lpConstLimit(); testOper = loop.lpTestOper(); lvar = loop.lpIterVar(); iterInc = loop.lpIterConst(); iterOper = loop.lpIterOper(); iterOperType = loop.lpIterOperType(); unsTest = (loop.lpTestTree->gtFlags & GTF_UNSIGNED) != 0; if (lvaTable[lvar].IsAddressExposed()) { // If the loop iteration variable is address-exposed then bail JITDUMP("Failed to unroll loop " FMT_LP ": V%02u is address exposed\n", lnum, lvar); continue; } if (lvaTable[lvar].lvIsStructField) { // If the loop iteration variable is a promoted field from a struct then bail JITDUMP("Failed to unroll loop " FMT_LP ": V%02u is a promoted struct field\n", lnum, lvar); continue; } // Locate/initialize the increment/test statements. Statement* initStmt = initBlock->lastStmt(); noway_assert((initStmt != nullptr) && (initStmt->GetNextStmt() == nullptr)); Statement* testStmt = bottom->lastStmt(); noway_assert((testStmt != nullptr) && (testStmt->GetNextStmt() == nullptr)); Statement* incrStmt = testStmt->GetPrevStmt(); noway_assert(incrStmt != nullptr); if (initStmt->GetRootNode()->OperIs(GT_JTRUE)) { // Must be a duplicated loop condition. dupCond = true; initStmt = initStmt->GetPrevStmt(); noway_assert(initStmt != nullptr); } else { dupCond = false; } // Find the number of iterations - the function returns false if not a constant number. if (!optComputeLoopRep(lbeg, llim, iterInc, iterOper, iterOperType, testOper, unsTest, dupCond, &totalIter)) { JITDUMP("Failed to unroll loop " FMT_LP ": not a constant iteration count\n", lnum); continue; } // Forget it if there are too many repetitions or not a constant loop. if (totalIter > iterLimit) { JITDUMP("Failed to unroll loop " FMT_LP ": too many iterations (%d > %d) (heuristic)\n", lnum, totalIter, iterLimit); continue; } int unrollLimitSz = UNROLL_LIMIT_SZ[compCodeOpt()]; if (INDEBUG(compStressCompile(STRESS_UNROLL_LOOPS, 50) ||) false) { // In stress mode, quadruple the size limit, and drop // the restriction that loop limit must be vector element count. unrollLimitSz *= 4; } else if (totalIter <= 1) { // No limit for single iteration loops // If there is no iteration (totalIter == 0), we will remove the loop body entirely. unrollLimitSz = INT_MAX; } else if (!(loopFlags & LPFLG_SIMD_LIMIT)) { // Otherwise unroll only if limit is Vector_.Length // (as a heuristic, not for correctness/structural reasons) JITDUMP("Failed to unroll loop " FMT_LP ": constant limit isn't Vector<T>.Length (heuristic)\n", lnum); continue; } GenTree* incr = incrStmt->GetRootNode(); // Don't unroll loops we don't understand. if (incr->gtOper != GT_ASG) { JITDUMP("Failed to unroll loop " FMT_LP ": unknown increment op (%s)\n", lnum, GenTree::OpName(incr->gtOper)); continue; } incr = incr->AsOp()->gtOp2; GenTree* init = initStmt->GetRootNode(); // Make sure everything looks ok. // clang-format off if ((init->gtOper != GT_ASG) || (init->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || (init->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != lvar) || (init->AsOp()->gtOp2->gtOper != GT_CNS_INT) || (init->AsOp()->gtOp2->AsIntCon()->gtIconVal != lbeg) || !((incr->gtOper == GT_ADD) || (incr->gtOper == GT_SUB)) || (incr->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || (incr->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != lvar) || (incr->AsOp()->gtOp2->gtOper != GT_CNS_INT) || (incr->AsOp()->gtOp2->AsIntCon()->gtIconVal != iterInc) || (testStmt->GetRootNode()->gtOper != GT_JTRUE)) { noway_assert(!"Bad precondition in Compiler::optUnrollLoops()"); continue; } // clang-format on // Heuristic: Estimated cost in code size of the unrolled loop. { ClrSafeInt<unsigned> loopCostSz; // Cost is size of one iteration auto tryIndex = loop.lpTop->bbTryIndex; // Besides calculating the loop cost, also ensure that all loop blocks are within the same EH // region, and count the number of BBJ_RETURN blocks in the loop. loopRetCount = 0; for (BasicBlock* const block : loop.LoopBlocks()) { if (block->bbTryIndex != tryIndex) { // Unrolling would require cloning EH regions JITDUMP("Failed to unroll loop " FMT_LP ": EH constraint\n", lnum); goto DONE_LOOP; } if (block->bbJumpKind == BBJ_RETURN) { ++loopRetCount; } for (Statement* const stmt : block->Statements()) { gtSetStmtInfo(stmt); loopCostSz += stmt->GetCostSz(); } } #ifdef JIT32_GCENCODER if ((totalIter > 0) && (fgReturnCount + loopRetCount * (totalIter - 1) > SET_EPILOGCNT_MAX)) { // Jit32 GC encoder can't report more than SET_EPILOGCNT_MAX epilogs. JITDUMP("Failed to unroll loop " FMT_LP ": GC encoder max epilog constraint\n", lnum); goto DONE_LOOP; } #endif // !JIT32_GCENCODER // Compute the estimated increase in code size for the unrolled loop. ClrSafeInt<unsigned> fixedLoopCostSz(8); ClrSafeInt<int> unrollCostSz = ClrSafeInt<int>(loopCostSz * ClrSafeInt<unsigned>(totalIter)) - ClrSafeInt<int>(loopCostSz + fixedLoopCostSz); // Don't unroll if too much code duplication would result. if (unrollCostSz.IsOverflow() || (unrollCostSz.Value() > unrollLimitSz)) { JITDUMP("Failed to unroll loop " FMT_LP ": size constraint (%d > %d) (heuristic)\n", lnum, unrollCostSz.Value(), unrollLimitSz); goto DONE_LOOP; } // Looks like a good idea to unroll this loop, let's do it! CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("\nUnrolling loop "); optPrintLoopInfo(&loop); printf(" over V%02u from %u to %u unrollCostSz = %d\n\n", lvar, lbeg, llim, unrollCostSz); } #endif } #if FEATURE_LOOP_ALIGN for (BasicBlock* const block : loop.LoopBlocks()) { block->unmarkLoopAlign(this DEBUG_ARG("Unrolled loop")); } #endif // Create the unrolled loop statement list. { // When unrolling a loop, that loop disappears (and will be removed from the loop table). Each unrolled // block will be set to exist within the parent loop, if any. However, if we unroll a loop that has // nested loops, we will create multiple copies of the nested loops. This requires adding new loop table // entries to represent the new loops. Instead of trying to do this incrementally, in the case where // nested loops exist (in any unrolled loop) we rebuild the entire loop table after unrolling. BlockToBlockMap blockMap(getAllocator(CMK_LoopOpt)); BasicBlock* insertAfter = bottom; BasicBlock::loopNumber newLoopNum = loop.lpParent; bool anyNestedLoopsUnrolledThisLoop = false; int lval; unsigned iterToUnroll = totalIter; // The number of iterations left to unroll for (lval = lbeg; iterToUnroll > 0; iterToUnroll--) { // Note: we can't use the loop.LoopBlocks() iterator, as it captures loop.lpBottom->bbNext at the // beginning of iteration, and we insert blocks before that. So we need to evaluate lpBottom->bbNext // every iteration. for (BasicBlock* block = loop.lpTop; block != loop.lpBottom->bbNext; block = block->bbNext) { BasicBlock* newBlock = insertAfter = fgNewBBafter(block->bbJumpKind, insertAfter, /*extendRegion*/ true); blockMap.Set(block, newBlock, BlockToBlockMap::Overwrite); if (!BasicBlock::CloneBlockState(this, newBlock, block, lvar, lval)) { // CloneBlockState (specifically, gtCloneExpr) doesn't handle everything. If it fails // to clone a block in the loop, splice out and forget all the blocks we cloned so far: // put the loop blocks back to how they were before we started cloning blocks, // and abort unrolling the loop. BasicBlock* oldBottomNext = insertAfter->bbNext; bottom->bbNext = oldBottomNext; oldBottomNext->bbPrev = bottom; loop.lpFlags |= LPFLG_DONT_UNROLL; // Mark it so we don't try to unroll it again. INDEBUG(++unrollFailures); JITDUMP("Failed to unroll loop " FMT_LP ": block cloning failed on " FMT_BB "\n", lnum, block->bbNum); goto DONE_LOOP; } // All blocks in the unrolled loop will now be marked with the parent loop number. Note that // if the loop being unrolled contains nested (child) loops, we will notice this below (when // we set anyNestedLoopsUnrolledThisLoop), and that will cause us to rebuild the entire loop // table and all loop annotations on blocks. However, if the loop contains no nested loops, // setting the block `bbNatLoopNum` here is sufficient to incrementally update the block's // loop info. newBlock->bbNatLoopNum = newLoopNum; // Block weight should no longer have the loop multiplier // // Note this is not quite right, as we may not have upscaled by this amount // and we might not have upscaled at all, if we had profile data. // newBlock->scaleBBWeight(1.0 / BB_LOOP_WEIGHT_SCALE); // Jump dests are set in a post-pass; make sure CloneBlockState hasn't tried to set them. assert(newBlock->bbJumpDest == nullptr); if (block == bottom) { // Remove the test; we're doing a full unroll. Statement* testCopyStmt = newBlock->lastStmt(); GenTree* testCopyExpr = testCopyStmt->GetRootNode(); assert(testCopyExpr->gtOper == GT_JTRUE); GenTree* sideEffList = nullptr; gtExtractSideEffList(testCopyExpr, &sideEffList, GTF_SIDE_EFFECT | GTF_ORDER_SIDEEFF); if (sideEffList == nullptr) { fgRemoveStmt(newBlock, testCopyStmt); } else { testCopyStmt->SetRootNode(sideEffList); } newBlock->bbJumpKind = BBJ_NONE; } } // Now redirect any branches within the newly-cloned iteration. // Don't include `bottom` in the iteration, since we've already changed the // newBlock->bbJumpKind, above. for (BasicBlock* block = loop.lpTop; block != loop.lpBottom; block = block->bbNext) { BasicBlock* newBlock = blockMap[block]; optCopyBlkDest(block, newBlock); optRedirectBlock(newBlock, &blockMap); } /* update the new value for the unrolled iterator */ switch (iterOper) { case GT_ADD: lval += iterInc; break; case GT_SUB: lval -= iterInc; break; case GT_RSH: case GT_LSH: noway_assert(!"Unrolling not implemented for this loop iterator"); goto DONE_LOOP; default: noway_assert(!"Unknown operator for constant loop iterator"); goto DONE_LOOP; } } // If we get here, we successfully cloned all the blocks in the unrolled loop. // Gut the old loop body for (BasicBlock* const block : loop.LoopBlocks()) { // Check if the old loop body had any nested loops that got cloned. Note that we need to do this // here, and not in the loop above, to handle the special case where totalIter is zero, and the // above loop doesn't execute. if (block->bbNatLoopNum != lnum) { anyNestedLoopsUnrolledThisLoop = true; } block->bbStmtList = nullptr; block->bbJumpKind = BBJ_NONE; block->bbFlags &= ~BBF_LOOP_HEAD; block->bbJumpDest = nullptr; block->bbNatLoopNum = newLoopNum; } if (anyNestedLoopsUnrolledThisLoop) { anyNestedLoopsUnrolled = true; } // If the HEAD is a BBJ_COND drop the condition (and make HEAD a BBJ_NONE block). if (head->bbJumpKind == BBJ_COND) { testStmt = head->lastStmt(); noway_assert(testStmt->GetRootNode()->gtOper == GT_JTRUE); fgRemoveStmt(head, testStmt); head->bbJumpKind = BBJ_NONE; } else { /* the loop must execute */ noway_assert(head->bbJumpKind == BBJ_NONE); } #ifdef DEBUG if (verbose) { printf("Whole unrolled loop:\n"); gtDispTree(initStmt->GetRootNode()); printf("\n"); fgDumpTrees(top, insertAfter); if (anyNestedLoopsUnrolledThisLoop) { printf("Unrolled loop " FMT_LP " contains nested loops\n", lnum); } } #endif // DEBUG // Update loop table. optMarkLoopRemoved(lnum); // Note if we created new BBJ_RETURNs (or removed some). if (totalIter > 0) { fgReturnCount += loopRetCount * (totalIter - 1); } else { assert(totalIter == 0); assert(fgReturnCount >= loopRetCount); fgReturnCount -= loopRetCount; } // Remember that something has changed. INDEBUG(++unrollCount); change = true; } DONE_LOOP:; } if (change) { #ifdef DEBUG if (verbose) { printf("\nFinished unrolling %d loops", unrollCount); if (unrollFailures > 0) { printf(", %d failures due to block cloning", unrollFailures); } printf("\n"); if (anyNestedLoopsUnrolled) { printf("At least one unrolled loop contains nested loops; recomputing loop table\n"); } } #endif // DEBUG // If we unrolled any nested loops, we rebuild the loop table (including recomputing the // return blocks list). constexpr bool computePreds = true; constexpr bool computeDoms = true; const bool computeReturnBlocks = anyNestedLoopsUnrolled; const bool computeLoops = anyNestedLoopsUnrolled; fgUpdateChangedFlowGraph(computePreds, computeDoms, computeReturnBlocks, computeLoops); DBEXEC(verbose, fgDispBasicBlocks()); } else { #ifdef DEBUG assert(unrollCount == 0); assert(!anyNestedLoopsUnrolled); if (unrollFailures > 0) { printf("\nFinished loop unrolling, %d failures due to block cloning\n", unrollFailures); } #endif // DEBUG } #ifdef DEBUG fgDebugCheckBBlist(true); fgDebugCheckLoopTable(); #endif // DEBUG return PhaseStatus::MODIFIED_EVERYTHING; } #ifdef _PREFAST_ #pragma warning(pop) #endif /***************************************************************************** * * Return false if there is a code path from 'topBB' to 'botBB' that might * not execute a method call. */ bool Compiler::optReachWithoutCall(BasicBlock* topBB, BasicBlock* botBB) { // TODO-Cleanup: Currently BBF_GC_SAFE_POINT is not set for helper calls, // as some helper calls are neither interruptible nor hijackable. // When we can determine this, then we can set BBF_GC_SAFE_POINT for // those helpers too. noway_assert(topBB->bbNum <= botBB->bbNum); // We can always check topBB and botBB for any gc safe points and early out if ((topBB->bbFlags | botBB->bbFlags) & BBF_GC_SAFE_POINT) { return false; } // Otherwise we will need to rely upon the dominator sets if (!fgDomsComputed) { // return a conservative answer of true when we don't have the dominator sets return true; } BasicBlock* curBB = topBB; for (;;) { noway_assert(curBB); // If we added a loop pre-header block then we will // have a bbNum greater than fgLastBB, and we won't have // any dominator information about this block, so skip it. // if (curBB->bbNum <= fgLastBB->bbNum) { noway_assert(curBB->bbNum <= botBB->bbNum); // Does this block contain a gc safe point? if (curBB->bbFlags & BBF_GC_SAFE_POINT) { // Will this block always execute on the way to botBB ? // // Since we are checking every block in [topBB .. botBB] and we are using // a lexical definition of a loop. // (all that we know is that is that botBB is a back-edge to topBB) // Thus while walking blocks in this range we may encounter some blocks // that are not really part of the loop, and so we need to perform // some additional checks: // // We will check that the current 'curBB' is reachable from 'topBB' // and that it dominates the block containing the back-edge 'botBB' // When both of these are true then we know that the gcsafe point in 'curBB' // will be encountered in the loop and we can return false // if (fgDominate(curBB, botBB) && fgReachable(topBB, curBB)) { return false; } } else { // If we've reached the destination block, then we're done if (curBB == botBB) { break; } } } curBB = curBB->bbNext; } // If we didn't find any blocks that contained a gc safe point and // also met the fgDominate and fgReachable criteria then we must return true // return true; } // static Compiler::fgWalkResult Compiler::optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data) { OptInvertCountTreeInfoType* o = (OptInvertCountTreeInfoType*)data->pCallbackData; if (Compiler::IsSharedStaticHelper(*pTree)) { o->sharedStaticHelperCount += 1; } if ((*pTree)->OperGet() == GT_ARR_LENGTH) { o->arrayLengthCount += 1; } return WALK_CONTINUE; } //----------------------------------------------------------------------------- // optInvertWhileLoop: modify flow and duplicate code so that for/while loops are // entered at top and tested at bottom (aka loop rotation or bottom testing). // Creates a "zero trip test" condition which guards entry to the loop. // Enables loop invariant hoisting and loop cloning, which depend on // `do {} while` format loops. Enables creation of a pre-header block after the // zero trip test to place code that only runs if the loop is guaranteed to // run at least once. // // Arguments: // block -- block that may be the predecessor of the un-rotated loop's test block. // // Returns: // true if any IR changes possibly made (used to determine phase return status) // // Notes: // Uses a simple lexical screen to detect likely loops. // // Specifically, we're looking for the following case: // // ... // jmp test // `block` argument // loop: // ... // ... // test: // ..stmts.. // cond // jtrue loop // // If we find this, and the condition is simple enough, we change // the loop to the following: // // ... // ..stmts.. // duplicated cond block statments // cond // duplicated cond // jfalse done // // else fall-through // loop: // ... // ... // test: // ..stmts.. // cond // jtrue loop // done: // // Makes no changes if the flow pattern match fails. // // May not modify a loop if profile is unfavorable, if the cost of duplicating // code is large (factoring in potential CSEs). // bool Compiler::optInvertWhileLoop(BasicBlock* block) { assert(opts.OptimizationEnabled()); assert(compCodeOpt() != SMALL_CODE); // Does the BB end with an unconditional jump? if (block->bbJumpKind != BBJ_ALWAYS || (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) { // It can't be one of the ones we use for our exception magic return false; } // Get hold of the jump target BasicBlock* bTest = block->bbJumpDest; // Does the block consist of 'jtrue(cond) block' ? if (bTest->bbJumpKind != BBJ_COND) { return false; } // bTest must be a backwards jump to block->bbNext if (bTest->bbJumpDest != block->bbNext) { return false; } // Since test is a BBJ_COND it will have a bbNext noway_assert(bTest->bbNext != nullptr); // 'block' must be in the same try region as the condition, since we're going to insert a duplicated condition // in a new block after 'block', and the condition might include exception throwing code. // On non-funclet platforms (x86), the catch exit is a BBJ_ALWAYS, but we don't want that to // be considered as the head of a loop, so also disallow different handler regions. if (!BasicBlock::sameEHRegion(block, bTest)) { return false; } // The duplicated condition block will branch to bTest->bbNext, so that also better be in the // same try region (or no try region) to avoid generating illegal flow. BasicBlock* bTestNext = bTest->bbNext; if (bTestNext->hasTryIndex() && !BasicBlock::sameTryRegion(block, bTestNext)) { return false; } // It has to be a forward jump. Defer this check until after all the cheap checks // are done, since it iterates forward in the block list looking for bbJumpDest. // TODO-CQ: Check if we can also optimize the backwards jump as well. // if (!fgIsForwardBranch(block)) { return false; } // Find the loop termination test at the bottom of the loop. Statement* condStmt = bTest->lastStmt(); // Verify the test block ends with a conditional that we can manipulate. GenTree* const condTree = condStmt->GetRootNode(); noway_assert(condTree->gtOper == GT_JTRUE); if (!condTree->AsOp()->gtOp1->OperIsCompare()) { return false; } // Estimate the cost of cloning the entire test block. // // Note: it would help throughput to compute the maximum cost // first and early out for large bTest blocks, as we are doing two // tree walks per tree. But because of this helper call scan, the // maximum cost depends on the trees in the block. // // We might consider flagging blocks with hoistable helper calls // during importation, so we can avoid the helper search and // implement an early bail out for large blocks with no helper calls. // // Note that gtPrepareCost can cause operand swapping, so we must // return `true` (possible IR change) from here on. unsigned estDupCostSz = 0; for (Statement* const stmt : bTest->Statements()) { GenTree* tree = stmt->GetRootNode(); gtPrepareCost(tree); estDupCostSz += tree->GetCostSz(); } weight_t loopIterations = BB_LOOP_WEIGHT_SCALE; bool allProfileWeightsAreValid = false; weight_t const weightBlock = block->bbWeight; weight_t const weightTest = bTest->bbWeight; weight_t const weightNext = block->bbNext->bbWeight; // If we have profile data then we calculate the number of times // the loop will iterate into loopIterations if (fgIsUsingProfileWeights()) { // Only rely upon the profile weight when all three of these blocks // have good profile weights if (block->hasProfileWeight() && bTest->hasProfileWeight() && block->bbNext->hasProfileWeight()) { // If this while loop never iterates then don't bother transforming // if (weightNext == BB_ZERO_WEIGHT) { return true; } // We generally expect weightTest == weightNext + weightBlock. // // Tolerate small inconsistencies... // if (!fgProfileWeightsConsistent(weightBlock + weightNext, weightTest)) { JITDUMP("Profile weights locally inconsistent: block " FMT_WT ", next " FMT_WT ", test " FMT_WT "\n", weightBlock, weightNext, weightTest); } else { allProfileWeightsAreValid = true; // Determine iteration count // // weightNext is the number of time this loop iterates // weightBlock is the number of times that we enter the while loop // loopIterations is the average number of times that this loop iterates // loopIterations = weightNext / weightBlock; } } else { JITDUMP("Missing profile data for loop!\n"); } } unsigned maxDupCostSz = 34; if ((compCodeOpt() == FAST_CODE) || compStressCompile(STRESS_DO_WHILE_LOOPS, 30)) { maxDupCostSz *= 4; } // If this loop iterates a lot then raise the maxDupCost if (loopIterations >= 12.0) { maxDupCostSz *= 2; if (loopIterations >= 96.0) { maxDupCostSz *= 2; } } // If the compare has too high cost then we don't want to dup. bool costIsTooHigh = (estDupCostSz > maxDupCostSz); OptInvertCountTreeInfoType optInvertTotalInfo = {}; if (costIsTooHigh) { // If we already know that the cost is acceptable, then don't waste time walking the tree // counting things to boost the maximum allowed cost. // // If the loop condition has a shared static helper, we really want this loop converted // as not converting the loop will disable loop hoisting, meaning the shared helper will // be executed on every loop iteration. // // If the condition has array.Length operations, also boost, as they are likely to be CSE'd. for (Statement* const stmt : bTest->Statements()) { GenTree* tree = stmt->GetRootNode(); OptInvertCountTreeInfoType optInvertInfo = {}; fgWalkTreePre(&tree, Compiler::optInvertCountTreeInfo, &optInvertInfo); optInvertTotalInfo.sharedStaticHelperCount += optInvertInfo.sharedStaticHelperCount; optInvertTotalInfo.arrayLengthCount += optInvertInfo.arrayLengthCount; if ((optInvertInfo.sharedStaticHelperCount > 0) || (optInvertInfo.arrayLengthCount > 0)) { // Calculate a new maximum cost. We might be able to early exit. unsigned newMaxDupCostSz = maxDupCostSz + 24 * min(optInvertTotalInfo.sharedStaticHelperCount, (int)(loopIterations + 1.5)) + 8 * optInvertTotalInfo.arrayLengthCount; // Is the cost too high now? costIsTooHigh = (estDupCostSz > newMaxDupCostSz); if (!costIsTooHigh) { // No need counting any more trees; we're going to do the transformation. JITDUMP("Decided to duplicate loop condition block after counting helpers in tree [%06u] in " "block " FMT_BB, dspTreeID(tree), bTest->bbNum); maxDupCostSz = newMaxDupCostSz; // for the JitDump output below break; } } } } #ifdef DEBUG if (verbose) { // Note that `optInvertTotalInfo.sharedStaticHelperCount = 0` means either there were zero helpers, or the // tree walk to count them was not done. printf( "\nDuplication of loop condition [%06u] is %s, because the cost of duplication (%i) is %s than %i," "\n loopIterations = %7.3f, optInvertTotalInfo.sharedStaticHelperCount >= %d, validProfileWeights = %s\n", dspTreeID(condTree), costIsTooHigh ? "not done" : "performed", estDupCostSz, costIsTooHigh ? "greater" : "less or equal", maxDupCostSz, loopIterations, optInvertTotalInfo.sharedStaticHelperCount, dspBool(allProfileWeightsAreValid)); } #endif if (costIsTooHigh) { return true; } bool foundCondTree = false; // Create a new block after `block` to put the copied condition code. block->bbJumpKind = BBJ_NONE; block->bbJumpDest = nullptr; BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true); // Clone each statement in bTest and append to bNewCond. for (Statement* const stmt : bTest->Statements()) { GenTree* originalTree = stmt->GetRootNode(); GenTree* clonedTree = gtCloneExpr(originalTree); // Special case handling needed for the conditional jump tree if (originalTree == condTree) { foundCondTree = true; // Get the compare subtrees GenTree* originalCompareTree = originalTree->AsOp()->gtOp1; GenTree* clonedCompareTree = clonedTree->AsOp()->gtOp1; assert(originalCompareTree->OperIsCompare()); assert(clonedCompareTree->OperIsCompare()); // Flag compare and cloned copy so later we know this loop // has a proper zero trip test. originalCompareTree->gtFlags |= GTF_RELOP_ZTT; clonedCompareTree->gtFlags |= GTF_RELOP_ZTT; // The original test branches to remain in the loop. The // new cloned test will branch to avoid the loop. So the // cloned compare needs to reverse the branch condition. gtReverseCond(clonedCompareTree); } Statement* clonedStmt = fgNewStmtAtEnd(bNewCond, clonedTree); if (opts.compDbgInfo) { clonedStmt->SetDebugInfo(stmt->GetDebugInfo()); } } assert(foundCondTree); // Flag the block that received the copy as potentially having an array/vtable // reference, nullcheck, object/array allocation if the block copied from did; // this is a conservative guess. if (auto copyFlags = bTest->bbFlags & (BBF_HAS_IDX_LEN | BBF_HAS_NULLCHECK | BBF_HAS_NEWOBJ | BBF_HAS_NEWARRAY)) { bNewCond->bbFlags |= copyFlags; } bNewCond->bbJumpDest = bTest->bbNext; bNewCond->inheritWeight(block); // Update bbRefs and bbPreds for 'bNewCond', 'bNewCond->bbNext' 'bTest' and 'bTest->bbNext'. fgAddRefPred(bNewCond, block); fgAddRefPred(bNewCond->bbNext, bNewCond); fgRemoveRefPred(bTest, block); fgAddRefPred(bTest->bbNext, bNewCond); // Move all predecessor edges that look like loop entry edges to point to the new cloned condition // block, not the existing condition block. The idea is that if we only move `block` to point to // `bNewCond`, but leave other `bTest` predecessors still pointing to `bTest`, when we eventually // recognize loops, the loop will appear to have multiple entries, which will prevent optimization. // We don't have loops yet, but blocks should be in increasing lexical numbered order, so use that // as the proxy for predecessors that are "in" versus "out" of the potential loop. Note that correctness // is maintained no matter which condition block we point to, but we'll lose optimization potential // (and create spaghetti code) if we get it wrong. BlockToBlockMap blockMap(getAllocator(CMK_LoopOpt)); bool blockMapInitialized = false; unsigned loopFirstNum = bNewCond->bbNext->bbNum; unsigned loopBottomNum = bTest->bbNum; for (BasicBlock* const predBlock : bTest->PredBlocks()) { unsigned bNum = predBlock->bbNum; if ((loopFirstNum <= bNum) && (bNum <= loopBottomNum)) { // Looks like the predecessor is from within the potential loop; skip it. continue; } if (!blockMapInitialized) { blockMapInitialized = true; blockMap.Set(bTest, bNewCond); } // Redirect the predecessor to the new block. JITDUMP("Redirecting non-loop " FMT_BB " -> " FMT_BB " to " FMT_BB " -> " FMT_BB "\n", predBlock->bbNum, bTest->bbNum, predBlock->bbNum, bNewCond->bbNum); optRedirectBlock(predBlock, &blockMap, /*updatePreds*/ true); } // If we have profile data for all blocks and we know that we are cloning the // `bTest` block into `bNewCond` and thus changing the control flow from `block` so // that it no longer goes directly to `bTest` anymore, we have to adjust // various weights. // if (allProfileWeightsAreValid) { // Update the weight for bTest // JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", bTest->bbNum, weightTest, weightNext); bTest->bbWeight = weightNext; // Determine the new edge weights. // // We project the next/jump ratio for block and bTest by using // the original likelihoods out of bTest. // // Note "next" is the loop top block, not bTest's bbNext, // we'll call this latter block "after". // weight_t const testToNextLikelihood = min(1.0, weightNext / weightTest); weight_t const testToAfterLikelihood = 1.0 - testToNextLikelihood; // Adjust edges out of bTest (which now has weight weightNext) // weight_t const testToNextWeight = weightNext * testToNextLikelihood; weight_t const testToAfterWeight = weightNext * testToAfterLikelihood; flowList* const edgeTestToNext = fgGetPredForBlock(bTest->bbJumpDest, bTest); flowList* const edgeTestToAfter = fgGetPredForBlock(bTest->bbNext, bTest); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (iterate loop)\n", bTest->bbNum, bTest->bbJumpDest->bbNum, testToNextWeight); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (exit loop)\n", bTest->bbNum, bTest->bbNext->bbNum, testToAfterWeight); edgeTestToNext->setEdgeWeights(testToNextWeight, testToNextWeight, bTest->bbJumpDest); edgeTestToAfter->setEdgeWeights(testToAfterWeight, testToAfterWeight, bTest->bbNext); // Adjust edges out of block, using the same distribution. // JITDUMP("Profile weight of " FMT_BB " remains unchanged at " FMT_WT "\n", block->bbNum, weightBlock); weight_t const blockToNextLikelihood = testToNextLikelihood; weight_t const blockToAfterLikelihood = testToAfterLikelihood; weight_t const blockToNextWeight = weightBlock * blockToNextLikelihood; weight_t const blockToAfterWeight = weightBlock * blockToAfterLikelihood; flowList* const edgeBlockToNext = fgGetPredForBlock(bNewCond->bbNext, bNewCond); flowList* const edgeBlockToAfter = fgGetPredForBlock(bNewCond->bbJumpDest, bNewCond); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (enter loop)\n", bNewCond->bbNum, bNewCond->bbNext->bbNum, blockToNextWeight); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (avoid loop)\n", bNewCond->bbNum, bNewCond->bbJumpDest->bbNum, blockToAfterWeight); edgeBlockToNext->setEdgeWeights(blockToNextWeight, blockToNextWeight, bNewCond->bbNext); edgeBlockToAfter->setEdgeWeights(blockToAfterWeight, blockToAfterWeight, bNewCond->bbJumpDest); #ifdef DEBUG // Verify profile for the two target blocks is consistent. // fgDebugCheckIncomingProfileData(bNewCond->bbNext); fgDebugCheckIncomingProfileData(bNewCond->bbJumpDest); #endif // DEBUG } #ifdef DEBUG if (verbose) { printf("\nDuplicated loop exit block at " FMT_BB " for loop (" FMT_BB " - " FMT_BB ")\n", bNewCond->bbNum, bNewCond->bbNext->bbNum, bTest->bbNum); printf("Estimated code size expansion is %d\n", estDupCostSz); fgDumpBlock(bNewCond); fgDumpBlock(bTest); } #endif // DEBUG return true; } //----------------------------------------------------------------------------- // optInvertLoops: invert while loops in the method // // Returns: // suitable phase status // PhaseStatus Compiler::optInvertLoops() { noway_assert(opts.OptimizationEnabled()); noway_assert(fgModified == false); #if defined(OPT_CONFIG) if (!JitConfig.JitDoLoopInversion()) { JITDUMP("Loop inversion disabled\n"); return PhaseStatus::MODIFIED_NOTHING; } #endif // OPT_CONFIG if (compCodeOpt() == SMALL_CODE) { return PhaseStatus::MODIFIED_NOTHING; } bool madeChanges = false; // Assume no changes made for (BasicBlock* const block : Blocks()) { // Make sure the appropriate fields are initialized // if (block->bbWeight == BB_ZERO_WEIGHT) { // Zero weighted block can't have a LOOP_HEAD flag noway_assert(block->isLoopHead() == false); continue; } if (optInvertWhileLoop(block)) { madeChanges = true; } } if (fgModified) { // Reset fgModified here as we've done a consistent set of edits. // fgModified = false; } return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING; } //----------------------------------------------------------------------------- // optOptimizeLayout: reorder blocks to reduce cost of control flow // // Returns: // suitable phase status // PhaseStatus Compiler::optOptimizeLayout() { noway_assert(opts.OptimizationEnabled()); noway_assert(fgModified == false); bool madeChanges = false; const bool allowTailDuplication = true; madeChanges |= fgUpdateFlowGraph(allowTailDuplication); madeChanges |= fgReorderBlocks(); madeChanges |= fgUpdateFlowGraph(); // fgReorderBlocks can cause IR changes even if it does not modify // the flow graph. It calls gtPrepareCost which can cause operand swapping. // Work around this for now. // // Note phase status only impacts dumping and checking done post-phase, // it has no impact on a release build. // madeChanges = true; return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING; } //------------------------------------------------------------------------ // optMarkLoopHeads: Mark all potential loop heads as BBF_LOOP_HEAD. A potential loop head is a block // targeted by a lexical back edge, where the source of the back edge is reachable from the block. // Note that if there are no lexical back edges, there can't be any loops. // // If there are any potential loop heads, set `fgHasLoops` to `true`. // // Assumptions: // The reachability sets must be computed and valid. // void Compiler::optMarkLoopHeads() { #ifdef DEBUG if (verbose) { printf("*************** In optMarkLoopHeads()\n"); } assert(!fgCheapPredsValid); assert(fgReachabilitySetsValid); fgDebugCheckBBNumIncreasing(); int loopHeadsMarked = 0; #endif bool hasLoops = false; for (BasicBlock* const block : Blocks()) { // Set BBF_LOOP_HEAD if we have backwards branches to this block. unsigned blockNum = block->bbNum; for (BasicBlock* const predBlock : block->PredBlocks()) { if (blockNum <= predBlock->bbNum) { if (predBlock->bbJumpKind == BBJ_CALLFINALLY) { // Loops never have BBJ_CALLFINALLY as the source of their "back edge". continue; } // If block can reach predBlock then we have a loop head if (BlockSetOps::IsMember(this, predBlock->bbReach, blockNum)) { hasLoops = true; block->bbFlags |= BBF_LOOP_HEAD; INDEBUG(++loopHeadsMarked); break; // No need to look at more `block` predecessors } } } } JITDUMP("%d loop heads marked\n", loopHeadsMarked); fgHasLoops = hasLoops; } //----------------------------------------------------------------------------- // optResetLoopInfo: reset all loop info in preparation for rebuilding the loop table, or preventing // future phases from accessing loop-related data. // void Compiler::optResetLoopInfo() { #ifdef DEBUG if (verbose) { printf("*************** In optResetLoopInfo()\n"); } #endif optLoopCount = 0; // This will force the table to be rebuilt loopAlignCandidates = 0; // This will cause users to crash if they use the table when it is considered empty. // TODO: the loop table is always allocated as the same (maximum) size, so this is wasteful. // We could zero it out (possibly only in DEBUG) to be paranoid, but there's no reason to // force it to be re-allocated. optLoopTable = nullptr; for (BasicBlock* const block : Blocks()) { // If the block weight didn't come from profile data, reset it so it can be calculated again. if (!block->hasProfileWeight()) { block->bbWeight = BB_UNITY_WEIGHT; block->bbFlags &= ~BBF_RUN_RARELY; } block->bbFlags &= ~BBF_LOOP_FLAGS; block->bbNatLoopNum = BasicBlock::NOT_IN_LOOP; } } //----------------------------------------------------------------------------- // optFindAndScaleGeneralLoopBlocks: scale block weights based on loop nesting depth. // Note that this uses a very general notion of "loop": any block targeted by a reachable // back-edge is considered a loop. // void Compiler::optFindAndScaleGeneralLoopBlocks() { #ifdef DEBUG if (verbose) { printf("*************** In optFindAndScaleGeneralLoopBlocks()\n"); } #endif // This code depends on block number ordering. INDEBUG(fgDebugCheckBBNumIncreasing()); unsigned generalLoopCount = 0; // We will use the following terminology: // top - the first basic block in the loop (i.e. the head of the backward edge) // bottom - the last block in the loop (i.e. the block from which we jump to the top) // lastBottom - used when we have multiple back edges to the same top for (BasicBlock* const top : Blocks()) { // Only consider `top` blocks already determined to be potential loop heads. if (!top->isLoopHead()) { continue; } BasicBlock* foundBottom = nullptr; for (BasicBlock* const bottom : top->PredBlocks()) { // Is this a loop candidate? - We look for "back edges" // Is this a backward edge? (from BOTTOM to TOP) if (top->bbNum > bottom->bbNum) { continue; } // We only consider back-edges that are BBJ_COND or BBJ_ALWAYS for loops. if ((bottom->bbJumpKind != BBJ_COND) && (bottom->bbJumpKind != BBJ_ALWAYS)) { continue; } /* the top block must be able to reach the bottom block */ if (!fgReachable(top, bottom)) { continue; } /* Found a new loop, record the longest backedge in foundBottom */ if ((foundBottom == nullptr) || (bottom->bbNum > foundBottom->bbNum)) { foundBottom = bottom; } } if (foundBottom) { generalLoopCount++; /* Mark all blocks between 'top' and 'bottom' */ optScaleLoopBlocks(top, foundBottom); } // We track at most 255 loops if (generalLoopCount == 255) { #if COUNT_LOOPS totalUnnatLoopOverflows++; #endif break; } } JITDUMP("\nFound a total of %d general loops.\n", generalLoopCount); #if COUNT_LOOPS totalUnnatLoopCount += generalLoopCount; #endif } //----------------------------------------------------------------------------- // optFindLoops: find loops in the function. // // The JIT recognizes two types of loops in a function: natural loops and "general" (or "unnatural") loops. // Natural loops are those which get added to the loop table. Most downstream optimizations require // using natural loops. See `optFindNaturalLoops` for a definition of the criteria for recognizing a natural loop. // A general loop is defined as a lexical (program order) range of blocks where a later block branches to an // earlier block (that is, there is a back edge in the flow graph), and the later block is reachable from the earlier // block. General loops are used for weighting flow graph blocks (when there is no block profile data), as well as // for determining if we require fully interruptible GC information. // // Notes: // Also (re)sets all non-IBC block weights, and marks loops potentially needing alignment padding. // void Compiler::optFindLoops() { #ifdef DEBUG if (verbose) { printf("*************** In optFindLoops()\n"); } #endif noway_assert(opts.OptimizationEnabled()); assert(fgDomsComputed); optMarkLoopHeads(); // Were there any potential loops in the flow graph? if (fgHasLoops) { optFindNaturalLoops(); optFindAndScaleGeneralLoopBlocks(); optIdentifyLoopsForAlignment(); // Check if any of the loops need alignment } #ifdef DEBUG fgDebugCheckLoopTable(); #endif optLoopsMarked = true; } //----------------------------------------------------------------------------- // optFindLoopsPhase: The wrapper function for the "find loops" phase. // PhaseStatus Compiler::optFindLoopsPhase() { optFindLoops(); return PhaseStatus::MODIFIED_EVERYTHING; } /***************************************************************************** * * Determine the kind of interference for the call. */ /* static */ inline Compiler::callInterf Compiler::optCallInterf(GenTreeCall* call) { // if not a helper, kills everything if (call->gtCallType != CT_HELPER) { return CALLINT_ALL; } // setfield and array address store kill all indirections switch (eeGetHelperNum(call->gtCallMethHnd)) { case CORINFO_HELP_ASSIGN_REF: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_CHECKED_ASSIGN_REF: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_ASSIGN_BYREF: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_SETFIELDOBJ: case CORINFO_HELP_ARRADDR_ST: return CALLINT_REF_INDIRS; case CORINFO_HELP_SETFIELDFLOAT: case CORINFO_HELP_SETFIELDDOUBLE: case CORINFO_HELP_SETFIELD8: case CORINFO_HELP_SETFIELD16: case CORINFO_HELP_SETFIELD32: case CORINFO_HELP_SETFIELD64: return CALLINT_SCL_INDIRS; case CORINFO_HELP_ASSIGN_STRUCT: // Not strictly needed as we don't use this case CORINFO_HELP_MEMSET: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_MEMCPY: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_SETFIELDSTRUCT: return CALLINT_ALL_INDIRS; default: break; } // other helpers kill nothing return CALLINT_NONE; } /***************************************************************************** * * See if the given tree can be computed in the given precision (which must * be smaller than the type of the tree for this to make sense). If 'doit' * is false, we merely check to see whether narrowing is possible; if we * get called with 'doit' being true, we actually perform the narrowing. */ bool Compiler::optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit) { genTreeOps oper; unsigned kind; noway_assert(tree); noway_assert(genActualType(tree->gtType) == genActualType(srct)); /* Assume we're only handling integer types */ noway_assert(varTypeIsIntegral(srct)); noway_assert(varTypeIsIntegral(dstt)); unsigned srcSize = genTypeSize(srct); unsigned dstSize = genTypeSize(dstt); /* dstt must be smaller than srct to narrow */ if (dstSize >= srcSize) { return false; } /* Figure out what kind of a node we have */ oper = tree->OperGet(); kind = tree->OperKind(); if (oper == GT_ASG) { noway_assert(doit == false); return false; } ValueNumPair NoVNPair = ValueNumPair(); if (kind & GTK_LEAF) { switch (oper) { /* Constants can usually be narrowed by changing their value */ CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT __int64 lval; __int64 lmask; case GT_CNS_LNG: lval = tree->AsIntConCommon()->LngValue(); lmask = 0; switch (dstt) { case TYP_BYTE: lmask = 0x0000007F; break; case TYP_BOOL: case TYP_UBYTE: lmask = 0x000000FF; break; case TYP_SHORT: lmask = 0x00007FFF; break; case TYP_USHORT: lmask = 0x0000FFFF; break; case TYP_INT: lmask = 0x7FFFFFFF; break; case TYP_UINT: lmask = 0xFFFFFFFF; break; default: return false; } if ((lval & lmask) != lval) return false; if (doit) { tree->BashToConst(static_cast<int32_t>(lval)); if (vnStore != nullptr) { fgValueNumberTreeConst(tree); } } return true; #endif case GT_CNS_INT: ssize_t ival; ival = tree->AsIntCon()->gtIconVal; ssize_t imask; imask = 0; switch (dstt) { case TYP_BYTE: imask = 0x0000007F; break; case TYP_BOOL: case TYP_UBYTE: imask = 0x000000FF; break; case TYP_SHORT: imask = 0x00007FFF; break; case TYP_USHORT: imask = 0x0000FFFF; break; #ifdef TARGET_64BIT case TYP_INT: imask = 0x7FFFFFFF; break; case TYP_UINT: imask = 0xFFFFFFFF; break; #endif // TARGET_64BIT default: return false; } if ((ival & imask) != ival) { return false; } #ifdef TARGET_64BIT if (doit) { tree->gtType = TYP_INT; tree->AsIntCon()->gtIconVal = (int)ival; if (vnStore != nullptr) { fgValueNumberTreeConst(tree); } } #endif // TARGET_64BIT return true; /* Operands that are in memory can usually be narrowed simply by changing their gtType */ case GT_LCL_VAR: /* We only allow narrowing long -> int for a GT_LCL_VAR */ if (dstSize == sizeof(int)) { goto NARROW_IND; } break; case GT_CLS_VAR: case GT_LCL_FLD: goto NARROW_IND; default: break; } noway_assert(doit == false); return false; } if (kind & (GTK_BINOP | GTK_UNOP)) { GenTree* op1; op1 = tree->AsOp()->gtOp1; GenTree* op2; op2 = tree->AsOp()->gtOp2; switch (tree->gtOper) { case GT_AND: noway_assert(genActualType(tree->gtType) == genActualType(op1->gtType)); noway_assert(genActualType(tree->gtType) == genActualType(op2->gtType)); GenTree* opToNarrow; opToNarrow = nullptr; GenTree** otherOpPtr; otherOpPtr = nullptr; bool foundOperandThatBlocksNarrowing; foundOperandThatBlocksNarrowing = false; // If 'dstt' is unsigned and one of the operands can be narrowed into 'dsst', // the result of the GT_AND will also fit into 'dstt' and can be narrowed. // The same is true if one of the operands is an int const and can be narrowed into 'dsst'. if (!gtIsActiveCSE_Candidate(op2) && ((op2->gtOper == GT_CNS_INT) || varTypeIsUnsigned(dstt))) { if (optNarrowTree(op2, srct, dstt, NoVNPair, false)) { opToNarrow = op2; otherOpPtr = &tree->AsOp()->gtOp1; } else { foundOperandThatBlocksNarrowing = true; } } if ((opToNarrow == nullptr) && !gtIsActiveCSE_Candidate(op1) && ((op1->gtOper == GT_CNS_INT) || varTypeIsUnsigned(dstt))) { if (optNarrowTree(op1, srct, dstt, NoVNPair, false)) { opToNarrow = op1; otherOpPtr = &tree->AsOp()->gtOp2; } else { foundOperandThatBlocksNarrowing = true; } } if (opToNarrow != nullptr) { // We will change the type of the tree and narrow opToNarrow // if (doit) { tree->gtType = genActualType(dstt); tree->SetVNs(vnpNarrow); optNarrowTree(opToNarrow, srct, dstt, NoVNPair, true); // We may also need to cast away the upper bits of *otherOpPtr if (srcSize == 8) { assert(tree->gtType == TYP_INT); GenTree* castOp = gtNewCastNode(TYP_INT, *otherOpPtr, false, TYP_INT); #ifdef DEBUG castOp->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif *otherOpPtr = castOp; } } return true; } if (foundOperandThatBlocksNarrowing) { noway_assert(doit == false); return false; } goto COMMON_BINOP; case GT_ADD: case GT_MUL: if (tree->gtOverflow() || varTypeIsSmall(dstt)) { noway_assert(doit == false); return false; } FALLTHROUGH; case GT_OR: case GT_XOR: noway_assert(genActualType(tree->gtType) == genActualType(op1->gtType)); noway_assert(genActualType(tree->gtType) == genActualType(op2->gtType)); COMMON_BINOP: if (gtIsActiveCSE_Candidate(op1) || gtIsActiveCSE_Candidate(op2) || !optNarrowTree(op1, srct, dstt, NoVNPair, doit) || !optNarrowTree(op2, srct, dstt, NoVNPair, doit)) { noway_assert(doit == false); return false; } /* Simply change the type of the tree */ if (doit) { if (tree->gtOper == GT_MUL && (tree->gtFlags & GTF_MUL_64RSLT)) { tree->gtFlags &= ~GTF_MUL_64RSLT; } tree->gtType = genActualType(dstt); tree->SetVNs(vnpNarrow); } return true; case GT_IND: NARROW_IND: if ((dstSize > genTypeSize(tree->gtType)) && (varTypeIsUnsigned(dstt) && !varTypeIsUnsigned(tree->gtType))) { return false; } /* Simply change the type of the tree */ if (doit && (dstSize <= genTypeSize(tree->gtType))) { if (!varTypeIsSmall(dstt)) { dstt = varTypeToSigned(dstt); } tree->gtType = dstt; tree->SetVNs(vnpNarrow); /* Make sure we don't mess up the variable type */ if ((oper == GT_LCL_VAR) || (oper == GT_LCL_FLD)) { tree->gtFlags |= GTF_VAR_CAST; } } return true; case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GT: case GT_GE: /* These can always be narrowed since they only represent 0 or 1 */ return true; case GT_CAST: { var_types cast = tree->CastToType(); var_types oprt = op1->TypeGet(); unsigned oprSize = genTypeSize(oprt); if (cast != srct) { return false; } if (varTypeIsIntegralOrI(dstt) != varTypeIsIntegralOrI(oprt)) { return false; } if (tree->gtOverflow()) { return false; } /* Is this a cast from the type we're narrowing to or a smaller one? */ if (oprSize <= dstSize) { /* Bash the target type of the cast */ if (doit) { if (!varTypeIsSmall(dstt)) { dstt = varTypeToSigned(dstt); } if ((oprSize == dstSize) && ((varTypeIsUnsigned(dstt) == varTypeIsUnsigned(oprt)) || !varTypeIsSmall(dstt))) { // Same size and there is no signedness mismatch for small types: change the CAST // into a NOP JITDUMP("Cast operation has no effect, bashing [%06d] GT_CAST into a GT_NOP.\n", dspTreeID(tree)); tree->ChangeOper(GT_NOP); tree->gtType = dstt; // Clear the GTF_UNSIGNED flag, as it may have been set on the cast node tree->gtFlags &= ~GTF_UNSIGNED; tree->AsOp()->gtOp2 = nullptr; tree->gtVNPair = op1->gtVNPair; // Set to op1's ValueNumber } else { // oprSize is smaller or there is a signedness mismatch for small types // Change the CastToType in the GT_CAST node tree->CastToType() = dstt; // The result type of a GT_CAST is never a small type. // Use genActualType to widen dstt when it is a small types. tree->gtType = genActualType(dstt); tree->SetVNs(vnpNarrow); } } return true; } } return false; case GT_COMMA: if (!gtIsActiveCSE_Candidate(op2) && optNarrowTree(op2, srct, dstt, vnpNarrow, doit)) { /* Simply change the type of the tree */ if (doit) { tree->gtType = genActualType(dstt); tree->SetVNs(vnpNarrow); } return true; } return false; default: noway_assert(doit == false); return false; } } return false; } /***************************************************************************** * * The following logic figures out whether the given variable is assigned * somewhere in a list of basic blocks (or in an entire loop). */ Compiler::fgWalkResult Compiler::optIsVarAssgCB(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; if (tree->OperIsSsaDef()) { isVarAssgDsc* desc = (isVarAssgDsc*)data->pCallbackData; assert(desc && desc->ivaSelf == desc); GenTree* dest = nullptr; if (tree->OperIs(GT_CALL)) { desc->ivaMaskCall = optCallInterf(tree->AsCall()); dest = tree->AsCall()->GetLclRetBufArgNode(); if (dest == nullptr) { return WALK_CONTINUE; } dest = dest->AsOp()->gtOp1; } else { dest = tree->AsOp()->gtOp1; } genTreeOps destOper = dest->OperGet(); if (destOper == GT_LCL_VAR) { unsigned tvar = dest->AsLclVarCommon()->GetLclNum(); if (tvar < lclMAX_ALLSET_TRACKED) { AllVarSetOps::AddElemD(data->compiler, desc->ivaMaskVal, tvar); } else { desc->ivaMaskIncomplete = true; } if (tvar == desc->ivaVar) { if (tree != desc->ivaSkip) { return WALK_ABORT; } } } else if (destOper == GT_LCL_FLD) { /* We can't track every field of every var. Moreover, indirections may access different parts of the var as different (but overlapping) fields. So just treat them as indirect accesses */ // unsigned lclNum = dest->AsLclFld()->GetLclNum(); // noway_assert(lvaTable[lclNum].lvAddrTaken); varRefKinds refs = varTypeIsGC(tree->TypeGet()) ? VR_IND_REF : VR_IND_SCL; desc->ivaMaskInd = varRefKinds(desc->ivaMaskInd | refs); } else if (destOper == GT_CLS_VAR) { desc->ivaMaskInd = varRefKinds(desc->ivaMaskInd | VR_GLB_VAR); } else if (destOper == GT_IND) { /* Set the proper indirection bits */ varRefKinds refs = varTypeIsGC(tree->TypeGet()) ? VR_IND_REF : VR_IND_SCL; desc->ivaMaskInd = varRefKinds(desc->ivaMaskInd | refs); } } return WALK_CONTINUE; } /*****************************************************************************/ bool Compiler::optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var) { bool result; isVarAssgDsc desc; desc.ivaSkip = skip; #ifdef DEBUG desc.ivaSelf = &desc; #endif desc.ivaVar = var; desc.ivaMaskCall = CALLINT_NONE; AllVarSetOps::AssignNoCopy(this, desc.ivaMaskVal, AllVarSetOps::MakeEmpty(this)); for (;;) { noway_assert(beg != nullptr); for (Statement* const stmt : beg->Statements()) { if (fgWalkTreePre(stmt->GetRootNodePointer(), optIsVarAssgCB, &desc) != WALK_CONTINUE) { result = true; goto DONE; } } if (beg == end) { break; } beg = beg->bbNext; } result = false; DONE: return result; } /***************************************************************************** * Is "var" assigned in the loop "lnum" ? */ bool Compiler::optIsVarAssgLoop(unsigned lnum, unsigned var) { assert(lnum < optLoopCount); if (var < lclMAX_ALLSET_TRACKED) { ALLVARSET_TP vs(AllVarSetOps::MakeSingleton(this, var)); return optIsSetAssgLoop(lnum, vs) != 0; } else { return optIsVarAssigned(optLoopTable[lnum].lpHead->bbNext, optLoopTable[lnum].lpBottom, nullptr, var); } } /*****************************************************************************/ int Compiler::optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds) { noway_assert(lnum < optLoopCount); LoopDsc* loop = &optLoopTable[lnum]; /* Do we already know what variables are assigned within this loop? */ if (!(loop->lpFlags & LPFLG_ASGVARS_YES)) { isVarAssgDsc desc; /* Prepare the descriptor used by the tree walker call-back */ desc.ivaVar = (unsigned)-1; desc.ivaSkip = nullptr; #ifdef DEBUG desc.ivaSelf = &desc; #endif AllVarSetOps::AssignNoCopy(this, desc.ivaMaskVal, AllVarSetOps::MakeEmpty(this)); desc.ivaMaskInd = VR_NONE; desc.ivaMaskCall = CALLINT_NONE; desc.ivaMaskIncomplete = false; /* Now walk all the statements of the loop */ for (BasicBlock* const block : loop->LoopBlocks()) { for (Statement* const stmt : block->NonPhiStatements()) { fgWalkTreePre(stmt->GetRootNodePointer(), optIsVarAssgCB, &desc); if (desc.ivaMaskIncomplete) { loop->lpFlags |= LPFLG_ASGVARS_INC; } } } AllVarSetOps::Assign(this, loop->lpAsgVars, desc.ivaMaskVal); loop->lpAsgInds = desc.ivaMaskInd; loop->lpAsgCall = desc.ivaMaskCall; /* Now we know what variables are assigned in the loop */ loop->lpFlags |= LPFLG_ASGVARS_YES; } /* Now we can finally test the caller's mask against the loop's */ if (!AllVarSetOps::IsEmptyIntersection(this, loop->lpAsgVars, vars) || (loop->lpAsgInds & inds)) { return 1; } switch (loop->lpAsgCall) { case CALLINT_ALL: /* Can't hoist if the call might have side effect on an indirection. */ if (loop->lpAsgInds != VR_NONE) { return 1; } break; case CALLINT_REF_INDIRS: /* Can't hoist if the call might have side effect on an ref indirection. */ if (loop->lpAsgInds & VR_IND_REF) { return 1; } break; case CALLINT_SCL_INDIRS: /* Can't hoist if the call might have side effect on an non-ref indirection. */ if (loop->lpAsgInds & VR_IND_SCL) { return 1; } break; case CALLINT_ALL_INDIRS: /* Can't hoist if the call might have side effect on any indirection. */ if (loop->lpAsgInds & (VR_IND_REF | VR_IND_SCL)) { return 1; } break; case CALLINT_NONE: /* Other helpers kill nothing */ break; default: noway_assert(!"Unexpected lpAsgCall value"); } return 0; } void Compiler::optPerformHoistExpr(GenTree* origExpr, BasicBlock* exprBb, unsigned lnum) { assert(exprBb != nullptr); #ifdef DEBUG if (verbose) { printf("\nHoisting a copy of "); printTreeID(origExpr); printf(" from " FMT_BB " into PreHeader " FMT_BB " for loop " FMT_LP " <" FMT_BB ".." FMT_BB ">:\n", exprBb->bbNum, optLoopTable[lnum].lpHead->bbNum, lnum, optLoopTable[lnum].lpTop->bbNum, optLoopTable[lnum].lpBottom->bbNum); gtDispTree(origExpr); printf("\n"); } #endif // Create a copy of the expression and mark it for CSE's. GenTree* hoistExpr = gtCloneExpr(origExpr, GTF_MAKE_CSE); // The hoist Expr does not have to computed into a specific register, // so clear the RegNum if it was set in the original expression hoistExpr->ClearRegNum(); // Copy any loop memory dependence. optCopyLoopMemoryDependence(origExpr, hoistExpr); // At this point we should have a cloned expression, marked with the GTF_MAKE_CSE flag assert(hoistExpr != origExpr); assert(hoistExpr->gtFlags & GTF_MAKE_CSE); GenTree* hoist = hoistExpr; // The value of the expression isn't used (unless it's an assignment). if (hoistExpr->OperGet() != GT_ASG) { hoist = gtUnusedValNode(hoistExpr); } /* Put the statement in the preheader */ INDEBUG(optLoopTable[lnum].lpValidatePreHeader()); BasicBlock* preHead = optLoopTable[lnum].lpHead; // fgMorphTree requires that compCurBB be the block that contains // (or in this case, will contain) the expression. compCurBB = preHead; hoist = fgMorphTree(hoist); preHead->bbFlags |= (exprBb->bbFlags & (BBF_HAS_IDX_LEN | BBF_HAS_NULLCHECK)); Statement* hoistStmt = gtNewStmt(hoist); // Simply append the statement at the end of the preHead's list. Statement* firstStmt = preHead->firstStmt(); if (firstStmt != nullptr) { /* append after last statement */ Statement* lastStmt = preHead->lastStmt(); assert(lastStmt->GetNextStmt() == nullptr); lastStmt->SetNextStmt(hoistStmt); hoistStmt->SetPrevStmt(lastStmt); firstStmt->SetPrevStmt(hoistStmt); } else { /* Empty pre-header - store the single statement in the block */ preHead->bbStmtList = hoistStmt; hoistStmt->SetPrevStmt(hoistStmt); } hoistStmt->SetNextStmt(nullptr); #ifdef DEBUG if (verbose) { printf("This hoisted copy placed in PreHeader (" FMT_BB "):\n", preHead->bbNum); gtDispTree(hoist); printf("\n"); } #endif if (fgStmtListThreaded) { gtSetStmtInfo(hoistStmt); fgSetStmtSeq(hoistStmt); } #ifdef DEBUG if (m_nodeTestData != nullptr) { // What is the depth of the loop "lnum"? ssize_t depth = 0; unsigned lnumIter = lnum; while (optLoopTable[lnumIter].lpParent != BasicBlock::NOT_IN_LOOP) { depth++; lnumIter = optLoopTable[lnumIter].lpParent; } NodeToTestDataMap* testData = GetNodeTestData(); TestLabelAndNum tlAndN; if (testData->Lookup(origExpr, &tlAndN) && tlAndN.m_tl == TL_LoopHoist) { if (tlAndN.m_num == -1) { printf("Node "); printTreeID(origExpr); printf(" was declared 'do not hoist', but is being hoisted.\n"); assert(false); } else if (tlAndN.m_num != depth) { printf("Node "); printTreeID(origExpr); printf(" was declared as hoistable from loop at nesting depth %d; actually hoisted from loop at depth " "%d.\n", tlAndN.m_num, depth); assert(false); } else { // We've correctly hoisted this, so remove the annotation. Later, we'll check for any remaining "must // hoist" annotations. testData->Remove(origExpr); // Now we insert an annotation to make sure that "hoistExpr" is actually CSE'd. tlAndN.m_tl = TL_CSE_Def; tlAndN.m_num = m_loopHoistCSEClass++; testData->Set(hoistExpr, tlAndN); } } } #endif #if LOOP_HOIST_STATS if (!m_curLoopHasHoistedExpression) { m_loopsWithHoistedExpressions++; m_curLoopHasHoistedExpression = true; } m_totalHoistedExpressions++; #endif // LOOP_HOIST_STATS } void Compiler::optHoistLoopCode() { // If we don't have any loops in the method then take an early out now. if (optLoopCount == 0) { JITDUMP("\nNo loops; no hoisting\n"); return; } #ifdef DEBUG unsigned jitNoHoist = JitConfig.JitNoHoist(); if (jitNoHoist > 0) { JITDUMP("\nJitNoHoist set; no hoisting\n"); return; } #endif #if 0 // The code in this #if has been useful in debugging loop hoisting issues, by // enabling selective enablement of the loop hoisting optimization according to // method hash. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("loophoisthashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); // methHashLo = (unsigned(atoi(lostr)) << 2); // So we don't have to use negative numbers. } char* histr = getenv("loophoisthashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); // methHashHi = (unsigned(atoi(histr)) << 2); // So we don't have to use negative numbers. } if (methHash < methHashLo || methHash > methHashHi) return; printf("Doing loop hoisting in %s (0x%x).\n", info.compFullName, methHash); #endif // DEBUG #endif // 0 -- debugging loop hoisting issues #ifdef DEBUG if (verbose) { printf("\n*************** In optHoistLoopCode()\n"); printf("Blocks/Trees before phase\n"); fgDispBasicBlocks(true); fgDispHandlerTab(); optPrintLoopTable(); } #endif // Consider all the loop nests, in outer-to-inner order (thus hoisting expressions outside the largest loop in which // they are invariant.) LoopHoistContext hoistCtxt(this); for (unsigned lnum = 0; lnum < optLoopCount; lnum++) { if (optLoopTable[lnum].lpFlags & LPFLG_REMOVED) { JITDUMP("\nLoop " FMT_LP " was removed\n", lnum); continue; } if (optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP) { optHoistLoopNest(lnum, &hoistCtxt); } } #if DEBUG if (fgModified) { if (verbose) { printf("Blocks/Trees after optHoistLoopCode() modified flowgraph\n"); fgDispBasicBlocks(true); printf(""); } // Make sure that the predecessor lists are accurate fgDebugCheckBBlist(); } #endif #ifdef DEBUG // Test Data stuff.. // If we have no test data, early out. if (m_nodeTestData == nullptr) { return; } NodeToTestDataMap* testData = GetNodeTestData(); for (NodeToTestDataMap::KeyIterator ki = testData->Begin(); !ki.Equal(testData->End()); ++ki) { TestLabelAndNum tlAndN; GenTree* node = ki.Get(); bool b = testData->Lookup(node, &tlAndN); assert(b); if (tlAndN.m_tl != TL_LoopHoist) { continue; } // Otherwise, it is a loop hoist annotation. assert(tlAndN.m_num < 100); // >= 100 indicates nested static field address, should already have been moved. if (tlAndN.m_num >= 0) { printf("Node "); printTreeID(node); printf(" was declared 'must hoist', but has not been hoisted.\n"); assert(false); } } #endif // DEBUG } void Compiler::optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt) { // Do this loop, then recursively do all nested loops. JITDUMP("\n%s " FMT_LP "\n", optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP ? "Loop Nest" : "Nested Loop", lnum); #if LOOP_HOIST_STATS // Record stats m_curLoopHasHoistedExpression = false; m_loopsConsidered++; #endif // LOOP_HOIST_STATS optHoistThisLoop(lnum, hoistCtxt); VNSet* hoistedInCurLoop = hoistCtxt->ExtractHoistedInCurLoop(); if (optLoopTable[lnum].lpChild != BasicBlock::NOT_IN_LOOP) { // Add the ones hoisted in "lnum" to "hoistedInParents" for any nested loops. // TODO-Cleanup: we should have a set abstraction for loops. if (hoistedInCurLoop != nullptr) { for (VNSet::KeyIterator keys = hoistedInCurLoop->Begin(); !keys.Equal(hoistedInCurLoop->End()); ++keys) { #ifdef DEBUG bool b; assert(!hoistCtxt->m_hoistedInParentLoops.Lookup(keys.Get(), &b)); #endif hoistCtxt->m_hoistedInParentLoops.Set(keys.Get(), true); } } for (unsigned child = optLoopTable[lnum].lpChild; child != BasicBlock::NOT_IN_LOOP; child = optLoopTable[child].lpSibling) { optHoistLoopNest(child, hoistCtxt); } // Now remove them. // TODO-Cleanup: we should have a set abstraction for loops. if (hoistedInCurLoop != nullptr) { for (VNSet::KeyIterator keys = hoistedInCurLoop->Begin(); !keys.Equal(hoistedInCurLoop->End()); ++keys) { // Note that we asserted when we added these that they hadn't been members, so removing is appropriate. hoistCtxt->m_hoistedInParentLoops.Remove(keys.Get()); } } } } void Compiler::optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt) { LoopDsc* pLoopDsc = &optLoopTable[lnum]; /* If loop was removed continue */ if (pLoopDsc->lpFlags & LPFLG_REMOVED) { JITDUMP(" ... not hoisting " FMT_LP ": removed\n", lnum); return; } // Ensure the per-loop sets/tables are empty. hoistCtxt->m_curLoopVnInvariantCache.RemoveAll(); #ifdef DEBUG if (verbose) { printf("optHoistThisLoop for loop " FMT_LP " <" FMT_BB ".." FMT_BB ">:\n", lnum, pLoopDsc->lpTop->bbNum, pLoopDsc->lpBottom->bbNum); printf(" Loop body %s a call\n", (pLoopDsc->lpFlags & LPFLG_CONTAINS_CALL) ? "contains" : "does not contain"); printf(" Loop has %s\n", (pLoopDsc->lpExitCnt == 1) ? "single exit" : "multiple exits"); } #endif VARSET_TP loopVars(VarSetOps::Intersection(this, pLoopDsc->lpVarInOut, pLoopDsc->lpVarUseDef)); pLoopDsc->lpVarInOutCount = VarSetOps::Count(this, pLoopDsc->lpVarInOut); pLoopDsc->lpLoopVarCount = VarSetOps::Count(this, loopVars); pLoopDsc->lpHoistedExprCount = 0; #ifndef TARGET_64BIT unsigned longVarsCount = VarSetOps::Count(this, lvaLongVars); if (longVarsCount > 0) { // Since 64-bit variables take up two registers on 32-bit targets, we increase // the Counts such that each TYP_LONG variable counts twice. // VARSET_TP loopLongVars(VarSetOps::Intersection(this, loopVars, lvaLongVars)); VARSET_TP inOutLongVars(VarSetOps::Intersection(this, pLoopDsc->lpVarInOut, lvaLongVars)); #ifdef DEBUG if (verbose) { printf("\n LONGVARS(%d)=", VarSetOps::Count(this, lvaLongVars)); lvaDispVarSet(lvaLongVars); } #endif pLoopDsc->lpLoopVarCount += VarSetOps::Count(this, loopLongVars); pLoopDsc->lpVarInOutCount += VarSetOps::Count(this, inOutLongVars); } #endif // !TARGET_64BIT #ifdef DEBUG if (verbose) { printf("\n USEDEF (%d)=", VarSetOps::Count(this, pLoopDsc->lpVarUseDef)); lvaDispVarSet(pLoopDsc->lpVarUseDef); printf("\n INOUT (%d)=", pLoopDsc->lpVarInOutCount); lvaDispVarSet(pLoopDsc->lpVarInOut); printf("\n LOOPVARS(%d)=", pLoopDsc->lpLoopVarCount); lvaDispVarSet(loopVars); printf("\n"); } #endif unsigned floatVarsCount = VarSetOps::Count(this, lvaFloatVars); if (floatVarsCount > 0) { VARSET_TP loopFPVars(VarSetOps::Intersection(this, loopVars, lvaFloatVars)); VARSET_TP inOutFPVars(VarSetOps::Intersection(this, pLoopDsc->lpVarInOut, lvaFloatVars)); pLoopDsc->lpLoopVarFPCount = VarSetOps::Count(this, loopFPVars); pLoopDsc->lpVarInOutFPCount = VarSetOps::Count(this, inOutFPVars); pLoopDsc->lpHoistedFPExprCount = 0; pLoopDsc->lpLoopVarCount -= pLoopDsc->lpLoopVarFPCount; pLoopDsc->lpVarInOutCount -= pLoopDsc->lpVarInOutFPCount; #ifdef DEBUG if (verbose) { printf(" INOUT-FP(%d)=", pLoopDsc->lpVarInOutFPCount); lvaDispVarSet(inOutFPVars); printf("\n LOOPV-FP(%d)=", pLoopDsc->lpLoopVarFPCount); lvaDispVarSet(loopFPVars); printf("\n"); } #endif } else // (floatVarsCount == 0) { pLoopDsc->lpLoopVarFPCount = 0; pLoopDsc->lpVarInOutFPCount = 0; pLoopDsc->lpHoistedFPExprCount = 0; } // Find the set of definitely-executed blocks. // Ideally, the definitely-executed blocks are the ones that post-dominate the entry block. // Until we have post-dominators, we'll special-case for single-exit blocks. // // Todo: it is not clear if this is a correctness requirement or a profitability heuristic. // It seems like the latter. Ideally have enough safeguards to prevent hoisting exception // or side-effect dependent things. // // We really should consider hoisting from conditionally executed blocks, if they are frequently executed // and it is safe to evaluate the tree early. // // In particular if we have a loop nest, when scanning the outer loop we should consider hoisting from blocks // in enclosed loops. However, this is likely to scale poorly, and we really should instead start // hoisting inner to outer. // ArrayStack<BasicBlock*> defExec(getAllocatorLoopHoist()); if (pLoopDsc->lpExitCnt == 1) { assert(pLoopDsc->lpExit != nullptr); JITDUMP(" Only considering hoisting in blocks that dominate exit block " FMT_BB "\n", pLoopDsc->lpExit->bbNum); BasicBlock* cur = pLoopDsc->lpExit; // Push dominators, until we reach "entry" or exit the loop. while (cur != nullptr && pLoopDsc->lpContains(cur) && cur != pLoopDsc->lpEntry) { defExec.Push(cur); cur = cur->bbIDom; } // If we didn't reach the entry block, give up and *just* push the entry block. if (cur != pLoopDsc->lpEntry) { JITDUMP(" -- odd, we didn't reach entry from exit via dominators. Only considering hoisting in entry " "block " FMT_BB "\n", pLoopDsc->lpEntry->bbNum); defExec.Reset(); } defExec.Push(pLoopDsc->lpEntry); } else // More than one exit { JITDUMP(" only considering hoisting in entry block " FMT_BB "\n", pLoopDsc->lpEntry->bbNum); // We'll assume that only the entry block is definitely executed. // We could in the future do better. defExec.Push(pLoopDsc->lpEntry); } optHoistLoopBlocks(lnum, &defExec, hoistCtxt); } bool Compiler::optIsProfitableToHoistTree(GenTree* tree, unsigned lnum) { LoopDsc* pLoopDsc = &optLoopTable[lnum]; bool loopContainsCall = (pLoopDsc->lpFlags & LPFLG_CONTAINS_CALL) != 0; int availRegCount; int hoistedExprCount; int loopVarCount; int varInOutCount; if (varTypeIsFloating(tree)) { hoistedExprCount = pLoopDsc->lpHoistedFPExprCount; loopVarCount = pLoopDsc->lpLoopVarFPCount; varInOutCount = pLoopDsc->lpVarInOutFPCount; availRegCount = CNT_CALLEE_SAVED_FLOAT; if (!loopContainsCall) { availRegCount += CNT_CALLEE_TRASH_FLOAT - 1; } #ifdef TARGET_ARM // For ARM each double takes two FP registers // For now on ARM we won't track singles/doubles // and instead just assume that we always have doubles. // availRegCount /= 2; #endif } else { hoistedExprCount = pLoopDsc->lpHoistedExprCount; loopVarCount = pLoopDsc->lpLoopVarCount; varInOutCount = pLoopDsc->lpVarInOutCount; availRegCount = CNT_CALLEE_SAVED - 1; if (!loopContainsCall) { availRegCount += CNT_CALLEE_TRASH - 1; } #ifndef TARGET_64BIT // For our 32-bit targets Long types take two registers. if (varTypeIsLong(tree->TypeGet())) { availRegCount = (availRegCount + 1) / 2; } #endif } // decrement the availRegCount by the count of expression that we have already hoisted. availRegCount -= hoistedExprCount; // the variables that are read/written inside the loop should // always be a subset of the InOut variables for the loop assert(loopVarCount <= varInOutCount); // When loopVarCount >= availRegCount we believe that all of the // available registers will get used to hold LclVars inside the loop. // This pessimistically assumes that each loopVar has a conflicting // lifetime with every other loopVar. // For this case we will hoist the expression only if is profitable // to place it in a stack home location (GetCostEx() >= 2*IND_COST_EX) // as we believe it will be placed in the stack or one of the other // loopVars will be spilled into the stack // if (loopVarCount >= availRegCount) { // Don't hoist expressions that are not heavy: tree->GetCostEx() < (2*IND_COST_EX) if (tree->GetCostEx() < (2 * IND_COST_EX)) { JITDUMP(" tree cost too low: %d < %d (loopVarCount %u >= availableRegCount %u)\n", tree->GetCostEx(), 2 * IND_COST_EX, loopVarCount, availRegCount); return false; } } // When varInOutCount < availRegCount we are know that there are // some available register(s) when we enter the loop body. // When varInOutCount == availRegCount there often will be a register // available when we enter the loop body, since a loop often defines a // LclVar on exit or there is often at least one LclVar that is worth // spilling to the stack to make way for this hoisted expression. // So we are willing hoist an expression with GetCostEx() == MIN_CSE_COST // if (varInOutCount > availRegCount) { // Don't hoist expressions that barely meet CSE cost requirements: tree->GetCostEx() == MIN_CSE_COST if (tree->GetCostEx() <= MIN_CSE_COST + 1) { JITDUMP(" tree not good CSE: %d <= %d (varInOutCount %u > availableRegCount %u)\n", tree->GetCostEx(), 2 * MIN_CSE_COST + 1, varInOutCount, availRegCount) return false; } } return true; } //------------------------------------------------------------------------ // optRecordLoopMemoryDependence: record that tree's value number // is dependent on a particular memory VN // // Arguments: // tree -- tree in question // block -- block containing tree // memoryVN -- VN for a "map" from a select operation encounterd // while computing the tree's VN // // Notes: // Only tracks trees in loops, and memory updates in the same loop nest. // So this is a coarse-grained dependence that is only usable for // hoisting tree out of its enclosing loops. // void Compiler::optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN) { // If tree is not in a loop, we don't need to track its loop dependence. // unsigned const loopNum = block->bbNatLoopNum; if (loopNum == BasicBlock::NOT_IN_LOOP) { return; } // Find the loop associated with this memory VN. // unsigned updateLoopNum = vnStore->LoopOfVN(memoryVN); if (updateLoopNum >= BasicBlock::MAX_LOOP_NUM) { // There should be only two special non-loop loop nums. // assert((updateLoopNum == BasicBlock::MAX_LOOP_NUM) || (updateLoopNum == BasicBlock::NOT_IN_LOOP)); // memoryVN defined outside of any loop, we can ignore. // JITDUMP(" ==> Not updating loop memory dependence of [%06u], memory " FMT_VN " not defined in a loop\n", dspTreeID(tree), memoryVN); return; } // If the loop was removed, then record the dependence in the nearest enclosing loop, if any. // while ((optLoopTable[updateLoopNum].lpFlags & LPFLG_REMOVED) != 0) { unsigned const updateParentLoopNum = optLoopTable[updateLoopNum].lpParent; if (updateParentLoopNum == BasicBlock::NOT_IN_LOOP) { // Memory VN was defined in a loop, but no longer. // JITDUMP(" ==> Not updating loop memory dependence of [%06u], memory " FMT_VN " no longer defined in a loop\n", dspTreeID(tree), memoryVN); break; } JITDUMP(" ==> " FMT_LP " removed, updating dependence to parent " FMT_LP "\n", updateLoopNum, updateParentLoopNum); updateLoopNum = updateParentLoopNum; } // If the update block is not the the header of a loop containing // block, we can also ignore the update. // if (!optLoopContains(updateLoopNum, loopNum)) { JITDUMP(" ==> Not updating loop memory dependence of [%06u]/" FMT_LP ", memory " FMT_VN "/" FMT_LP " is not defined in an enclosing loop\n", dspTreeID(tree), loopNum, memoryVN, updateLoopNum); return; } // If we already have a recorded a loop entry block for this // tree, see if the new update is for a more closely nested // loop. // NodeToLoopMemoryBlockMap* const map = GetNodeToLoopMemoryBlockMap(); BasicBlock* mapBlock = nullptr; if (map->Lookup(tree, &mapBlock)) { unsigned const mapLoopNum = mapBlock->bbNatLoopNum; // If the update loop contains the existing map loop, // the existing map loop is more constraining. So no // update needed. // if (optLoopContains(updateLoopNum, mapLoopNum)) { JITDUMP(" ==> Not updating loop memory dependence of [%06u]; alrady constrained to " FMT_LP " nested in " FMT_LP "\n", dspTreeID(tree), mapLoopNum, updateLoopNum); return; } } // MemoryVN now describes the most constraining loop memory dependence // we know of. Update the map. // JITDUMP(" ==> Updating loop memory dependence of [%06u] to " FMT_LP "\n", dspTreeID(tree), updateLoopNum); map->Set(tree, optLoopTable[updateLoopNum].lpEntry, NodeToLoopMemoryBlockMap::Overwrite); } //------------------------------------------------------------------------ // optCopyLoopMemoryDependence: record that tree's loop memory dependence // is the same as some other tree. // // Arguments: // fromTree -- tree to copy dependence from // toTree -- tree in question // void Compiler::optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree) { NodeToLoopMemoryBlockMap* const map = GetNodeToLoopMemoryBlockMap(); BasicBlock* mapBlock = nullptr; if (map->Lookup(fromTree, &mapBlock)) { map->Set(toTree, mapBlock); } } //------------------------------------------------------------------------ // optHoistLoopBlocks: Hoist invariant expression out of the loop. // // Arguments: // loopNum - The number of the loop // blocks - A stack of blocks belonging to the loop // hoistContext - The loop hoist context // // Assumptions: // The `blocks` stack contains the definitely-executed blocks in // the loop, in the execution order, starting with the loop entry // block on top of the stack. // void Compiler::optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext) { class HoistVisitor : public GenTreeVisitor<HoistVisitor> { class Value { GenTree* m_node; public: bool m_hoistable; bool m_cctorDependent; bool m_invariant; #ifdef DEBUG const char* m_failReason; #endif Value(GenTree* node) : m_node(node), m_hoistable(false), m_cctorDependent(false), m_invariant(false) { #ifdef DEBUG m_failReason = "unset"; #endif } GenTree* Node() { return m_node; } }; ArrayStack<Value> m_valueStack; bool m_beforeSideEffect; unsigned m_loopNum; LoopHoistContext* m_hoistContext; BasicBlock* m_currentBlock; bool IsNodeHoistable(GenTree* node) { // TODO-CQ: This is a more restrictive version of a check that optIsCSEcandidate already does - it allows // a struct typed node if a class handle can be recovered from it. if (node->TypeGet() == TYP_STRUCT) { return false; } // Tree must be a suitable CSE candidate for us to be able to hoist it. return m_compiler->optIsCSEcandidate(node); } bool IsTreeVNInvariant(GenTree* tree) { ValueNum vn = tree->gtVNPair.GetLiberal(); bool vnIsInvariant = m_compiler->optVNIsLoopInvariant(vn, m_loopNum, &m_hoistContext->m_curLoopVnInvariantCache); // Even though VN is invariant in the loop (say a constant) its value may depend on position // of tree, so for loop hoisting we must also check that any memory read by tree // is also invariant in the loop. // if (vnIsInvariant) { vnIsInvariant = IsTreeLoopMemoryInvariant(tree); } return vnIsInvariant; } //------------------------------------------------------------------------ // IsTreeLoopMemoryInvariant: determine if the value number of tree // is dependent on the tree being executed within the current loop // // Arguments: // tree -- tree in question // // Returns: // true if tree could be evaluated just before loop and get the // same value. // // Note: // Calls are optimistically assumed to be invariant. // Caller must do their own analysis for these tree types. // bool IsTreeLoopMemoryInvariant(GenTree* tree) { if (tree->IsCall()) { // Calls are handled specially by hoisting, and loop memory dependence // must be checked by other means. // return true; } NodeToLoopMemoryBlockMap* const map = m_compiler->GetNodeToLoopMemoryBlockMap(); BasicBlock* loopEntryBlock = nullptr; if (map->Lookup(tree, &loopEntryBlock)) { for (MemoryKind memoryKind : allMemoryKinds()) { ValueNum loopMemoryVN = m_compiler->GetMemoryPerSsaData(loopEntryBlock->bbMemorySsaNumIn[memoryKind]) ->m_vnPair.GetLiberal(); if (!m_compiler->optVNIsLoopInvariant(loopMemoryVN, m_loopNum, &m_hoistContext->m_curLoopVnInvariantCache)) { return false; } } } return true; } public: enum { ComputeStack = false, DoPreOrder = true, DoPostOrder = true, DoLclVarsOnly = false, UseExecutionOrder = true, }; HoistVisitor(Compiler* compiler, unsigned loopNum, LoopHoistContext* hoistContext) : GenTreeVisitor(compiler) , m_valueStack(compiler->getAllocator(CMK_LoopHoist)) , m_beforeSideEffect(true) , m_loopNum(loopNum) , m_hoistContext(hoistContext) , m_currentBlock(nullptr) { } void HoistBlock(BasicBlock* block) { m_currentBlock = block; for (Statement* const stmt : block->NonPhiStatements()) { WalkTree(stmt->GetRootNodePointer(), nullptr); Value& top = m_valueStack.TopRef(); assert(top.Node() == stmt->GetRootNode()); if (top.m_hoistable) { m_compiler->optHoistCandidate(stmt->GetRootNode(), block, m_loopNum, m_hoistContext); } else { JITDUMP(" [%06u] not %s: %s\n", dspTreeID(top.Node()), top.m_invariant ? "invariant" : "hoistable", top.m_failReason); } m_valueStack.Reset(); } // Only unconditionally executed blocks in the loop are visited (see optHoistThisLoop) // so after we're done visiting the first block we need to assume the worst, that the // blocks that are not visisted have side effects. m_beforeSideEffect = false; } fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { GenTree* node = *use; m_valueStack.Emplace(node); return fgWalkResult::WALK_CONTINUE; } fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { GenTree* tree = *use; if (tree->OperIsLocal()) { GenTreeLclVarCommon* lclVar = tree->AsLclVarCommon(); unsigned lclNum = lclVar->GetLclNum(); // To be invariant a LclVar node must not be the LHS of an assignment ... bool isInvariant = !user->OperIs(GT_ASG) || (user->AsOp()->gtGetOp1() != tree); // and the variable must be in SSA ... isInvariant = isInvariant && m_compiler->lvaInSsa(lclNum) && lclVar->HasSsaName(); // and the SSA definition must be outside the loop we're hoisting from ... isInvariant = isInvariant && !m_compiler->optLoopTable[m_loopNum].lpContains( m_compiler->lvaGetDesc(lclNum)->GetPerSsaData(lclVar->GetSsaNum())->GetBlock()); // and the VN of the tree is considered invariant as well. // // TODO-CQ: This VN invariance check should not be necessary and in some cases it is conservative - it // is possible that the SSA def is outside the loop but VN does not understand what the node is doing // (e.g. LCL_FLD-based type reinterpretation) and assigns a "new, unique VN" to the node. This VN is // associated with the block where the node is, a loop block, and thus the VN is considered to not be // invariant. // On the other hand, it is possible for a SSA def to be inside the loop yet the use to be invariant, // if the defining expression is also invariant. In such a case the VN invariance would help but it is // blocked by the SSA invariance check. isInvariant = isInvariant && IsTreeVNInvariant(tree); Value& top = m_valueStack.TopRef(); assert(top.Node() == tree); if (isInvariant) { top.m_invariant = true; // In general it doesn't make sense to hoist a local node but there are exceptions, for example // LCL_FLD nodes (because then the variable cannot be enregistered and the node always turns // into a memory access). top.m_hoistable = IsNodeHoistable(tree); } #ifdef DEBUG if (!isInvariant) { top.m_failReason = "local, not rvalue / not in SSA / defined within current loop"; } else if (!top.m_hoistable) { top.m_failReason = "not handled by cse"; } #endif return fgWalkResult::WALK_CONTINUE; } // Initclass CLS_VARs and IconHandles are the base cases of cctor dependent trees. // In the IconHandle case, it's of course the dereference, rather than the constant itself, that is // truly dependent on the cctor. So a more precise approach would be to separately propagate // isCctorDependent and isAddressWhoseDereferenceWouldBeCctorDependent, but we don't for // simplicity/throughput; the constant itself would be considered non-hoistable anyway, since // optIsCSEcandidate returns false for constants. bool treeIsCctorDependent = ((tree->OperIs(GT_CLS_VAR) && ((tree->gtFlags & GTF_CLS_VAR_INITCLASS) != 0)) || (tree->OperIs(GT_CNS_INT) && ((tree->gtFlags & GTF_ICON_INITCLASS) != 0))); bool treeIsInvariant = true; bool treeHasHoistableChildren = false; int childCount; #ifdef DEBUG const char* failReason = "unknown"; #endif for (childCount = 0; m_valueStack.TopRef(childCount).Node() != tree; childCount++) { Value& child = m_valueStack.TopRef(childCount); if (child.m_hoistable) { treeHasHoistableChildren = true; } if (!child.m_invariant) { treeIsInvariant = false; INDEBUG(failReason = "variant child";) } if (child.m_cctorDependent) { // Normally, a parent of a cctor-dependent tree is also cctor-dependent. treeIsCctorDependent = true; // Check for the case where we can stop propagating cctor-dependent upwards. if (tree->OperIs(GT_COMMA) && (child.Node() == tree->gtGetOp2())) { GenTree* op1 = tree->gtGetOp1(); if (op1->OperIs(GT_CALL)) { GenTreeCall* call = op1->AsCall(); if ((call->gtCallType == CT_HELPER) && s_helperCallProperties.MayRunCctor(eeGetHelperNum(call->gtCallMethHnd))) { // Hoisting the comma is ok because it would hoist the initialization along // with the static field reference. treeIsCctorDependent = false; // Hoisting the static field without hoisting the initialization would be // incorrect, make sure we consider the field (which we flagged as // cctor-dependent) non-hoistable. noway_assert(!child.m_hoistable); } } } } } // If all the children of "tree" are hoistable, then "tree" itself can be hoisted, // unless it has a static var reference that can't be hoisted past its cctor call. bool treeIsHoistable = treeIsInvariant && !treeIsCctorDependent; #ifdef DEBUG if (treeIsInvariant && !treeIsHoistable) { failReason = "cctor dependent"; } #endif // But we must see if anything else prevents "tree" from being hoisted. // if (treeIsInvariant) { if (treeIsHoistable) { treeIsHoistable = IsNodeHoistable(tree); if (!treeIsHoistable) { INDEBUG(failReason = "not handled by cse";) } } // If it's a call, it must be a helper call, and be pure. // Further, if it may run a cctor, it must be labeled as "Hoistable" // (meaning it won't run a cctor because the class is not precise-init). if (treeIsHoistable && tree->IsCall()) { GenTreeCall* call = tree->AsCall(); if (call->gtCallType != CT_HELPER) { INDEBUG(failReason = "non-helper call";) treeIsHoistable = false; } else { CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd); if (!s_helperCallProperties.IsPure(helpFunc)) { INDEBUG(failReason = "impure helper call";) treeIsHoistable = false; } else if (s_helperCallProperties.MayRunCctor(helpFunc) && ((call->gtFlags & GTF_CALL_HOISTABLE) == 0)) { INDEBUG(failReason = "non-hoistable helper call";) treeIsHoistable = false; } } } if (treeIsHoistable) { if (!m_beforeSideEffect) { // For now, we give up on an expression that might raise an exception if it is after the // first possible global side effect (and we assume we're after that if we're not in the first // block). // TODO-CQ: this is when we might do loop cloning. // if ((tree->gtFlags & GTF_EXCEPT) != 0) { INDEBUG(failReason = "side effect ordering constraint";) treeIsHoistable = false; } } } // Is the value of the whole tree loop invariant? treeIsInvariant = IsTreeVNInvariant(tree); // Is the value of the whole tree loop invariant? if (!treeIsInvariant) { // Here we have a tree that is not loop invariant and we thus cannot hoist INDEBUG(failReason = "tree VN is loop variant";) treeIsHoistable = false; } } // Next check if we need to set 'm_beforeSideEffect' to false. // // If we have already set it to false then we can skip these checks // if (m_beforeSideEffect) { // Is the value of the whole tree loop invariant? if (!treeIsInvariant) { // We have a tree that is not loop invariant and we thus cannot hoist assert(treeIsHoistable == false); // Check if we should clear m_beforeSideEffect. // If 'tree' can throw an exception then we need to set m_beforeSideEffect to false. // Note that calls are handled below if (tree->OperMayThrow(m_compiler) && !tree->IsCall()) { m_beforeSideEffect = false; } } // In the section below, we only care about memory side effects. We assume that expressions will // be hoisted so that they are evaluated in the same order as they would have been in the loop, // and therefore throw exceptions in the same order. // if (tree->IsCall()) { // If it's a call, it must be a helper call that does not mutate the heap. // Further, if it may run a cctor, it must be labeled as "Hoistable" // (meaning it won't run a cctor because the class is not precise-init). GenTreeCall* call = tree->AsCall(); if (call->gtCallType != CT_HELPER) { m_beforeSideEffect = false; } else { CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd); if (s_helperCallProperties.MutatesHeap(helpFunc)) { m_beforeSideEffect = false; } else if (s_helperCallProperties.MayRunCctor(helpFunc) && (call->gtFlags & GTF_CALL_HOISTABLE) == 0) { m_beforeSideEffect = false; } // Additional check for helper calls that throw exceptions if (!treeIsInvariant) { // We have a tree that is not loop invariant and we thus cannot hoist assert(treeIsHoistable == false); // Does this helper call throw? if (!s_helperCallProperties.NoThrow(helpFunc)) { m_beforeSideEffect = false; } } } } else if (tree->OperIs(GT_ASG)) { // If the LHS of the assignment has a global reference, then assume it's a global side effect. GenTree* lhs = tree->AsOp()->gtOp1; if (lhs->gtFlags & GTF_GLOB_REF) { m_beforeSideEffect = false; } } else if (tree->OperIs(GT_XADD, GT_XORR, GT_XAND, GT_XCHG, GT_LOCKADD, GT_CMPXCHG, GT_MEMORYBARRIER)) { // If this node is a MEMORYBARRIER or an Atomic operation // then don't hoist and stop any further hoisting after this node INDEBUG(failReason = "atomic op or memory barrier";) treeIsHoistable = false; m_beforeSideEffect = false; } } // If this 'tree' is hoistable then we return and the caller will // decide to hoist it as part of larger hoistable expression. // if (!treeIsHoistable && treeHasHoistableChildren) { // The current tree is not hoistable but it has hoistable children that we need // to hoist now. // // In order to preserve the original execution order, we also need to hoist any // other hoistable trees that we encountered so far. // At this point the stack contains (in top to bottom order): // - the current node's children // - the current node // - ancestors of the current node and some of their descendants // // The ancestors have not been visited yet in post order so they're not hoistable // (and they cannot become hoistable because the current node is not) but some of // their descendants may have already been traversed and be hoistable. // // The execution order is actually bottom to top so we'll start hoisting from // the bottom of the stack, skipping the current node (which is expected to not // be hoistable). // // Note that the treeHasHoistableChildren check avoids unnecessary stack traversing // and also prevents hoisting trees too early. If the current tree is not hoistable // and it doesn't have any hoistable children then there's no point in hoisting any // other trees. Doing so would interfere with the cctor dependent case, where the // cctor dependent node is initially not hoistable and may become hoistable later, // when its parent comma node is visited. // for (int i = 0; i < m_valueStack.Height(); i++) { Value& value = m_valueStack.BottomRef(i); if (value.m_hoistable) { assert(value.Node() != tree); // Don't hoist this tree again. value.m_hoistable = false; value.m_invariant = false; m_compiler->optHoistCandidate(value.Node(), m_currentBlock, m_loopNum, m_hoistContext); } else if (value.Node() != tree) { JITDUMP(" [%06u] not %s: %s\n", dspTreeID(value.Node()), value.m_invariant ? "invariant" : "hoistable", value.m_failReason); } } } m_valueStack.Pop(childCount); Value& top = m_valueStack.TopRef(); assert(top.Node() == tree); top.m_hoistable = treeIsHoistable; top.m_cctorDependent = treeIsCctorDependent; top.m_invariant = treeIsInvariant; #ifdef DEBUG if (!top.m_invariant || !top.m_hoistable) { top.m_failReason = failReason; } #endif return fgWalkResult::WALK_CONTINUE; } }; LoopDsc* loopDsc = &optLoopTable[loopNum]; assert(blocks->Top() == loopDsc->lpEntry); HoistVisitor visitor(this, loopNum, hoistContext); while (!blocks->Empty()) { BasicBlock* block = blocks->Pop(); weight_t blockWeight = block->getBBWeight(this); JITDUMP("\n optHoistLoopBlocks " FMT_BB " (weight=%6s) of loop " FMT_LP " <" FMT_BB ".." FMT_BB ">\n", block->bbNum, refCntWtd2str(blockWeight), loopNum, loopDsc->lpTop->bbNum, loopDsc->lpBottom->bbNum); if (blockWeight < (BB_UNITY_WEIGHT / 10)) { JITDUMP(" block weight is too small to perform hoisting.\n"); continue; } visitor.HoistBlock(block); } } void Compiler::optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt) { assert(lnum != BasicBlock::NOT_IN_LOOP); // It must pass the hoistable profitablity tests for this loop level if (!optIsProfitableToHoistTree(tree, lnum)) { JITDUMP(" ... not profitable to hoist\n"); return; } if (hoistCtxt->m_hoistedInParentLoops.Lookup(tree->gtVNPair.GetLiberal())) { JITDUMP(" ... already hoisted same VN in parent\n"); // already hoisted in a parent loop, so don't hoist this expression. return; } if (hoistCtxt->GetHoistedInCurLoop(this)->Lookup(tree->gtVNPair.GetLiberal())) { JITDUMP(" ... already hoisted same VN in current\n"); // already hoisted this expression in the current loop, so don't hoist this expression. return; } // Create a loop pre-header in which to put the hoisted code. fgCreateLoopPreHeader(lnum); // If the block we're hoisting from and the pre-header are in different EH regions, don't hoist. // TODO: we could probably hoist things that won't raise exceptions, such as constants. if (!BasicBlock::sameTryRegion(optLoopTable[lnum].lpHead, treeBb)) { JITDUMP(" ... not hoisting in " FMT_LP ", eh region constraint (pre-header try index %d, candidate " FMT_BB " try index %d\n", lnum, optLoopTable[lnum].lpHead->bbTryIndex, treeBb->bbNum, treeBb->bbTryIndex); return; } // Expression can be hoisted optPerformHoistExpr(tree, treeBb, lnum); // Increment lpHoistedExprCount or lpHoistedFPExprCount if (!varTypeIsFloating(tree->TypeGet())) { optLoopTable[lnum].lpHoistedExprCount++; #ifndef TARGET_64BIT // For our 32-bit targets Long types take two registers. if (varTypeIsLong(tree->TypeGet())) { optLoopTable[lnum].lpHoistedExprCount++; } #endif } else // Floating point expr hoisted { optLoopTable[lnum].lpHoistedFPExprCount++; } // Record the hoisted expression in hoistCtxt hoistCtxt->GetHoistedInCurLoop(this)->Set(tree->gtVNPair.GetLiberal(), true); } bool Compiler::optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* loopVnInvariantCache) { // If it is not a VN, is not loop-invariant. if (vn == ValueNumStore::NoVN) { return false; } // We'll always short-circuit constants. if (vnStore->IsVNConstant(vn) || vn == vnStore->VNForVoid()) { return true; } // If we've done this query previously, don't repeat. bool previousRes = false; if (loopVnInvariantCache->Lookup(vn, &previousRes)) { return previousRes; } bool res = true; VNFuncApp funcApp; if (vnStore->GetVNFunc(vn, &funcApp)) { if (funcApp.m_func == VNF_PhiDef) { // Is the definition within the loop? If so, is not loop-invariant. unsigned lclNum = funcApp.m_args[0]; unsigned ssaNum = funcApp.m_args[1]; LclSsaVarDsc* ssaDef = lvaTable[lclNum].GetPerSsaData(ssaNum); res = !optLoopContains(lnum, ssaDef->GetBlock()->bbNatLoopNum); } else if (funcApp.m_func == VNF_PhiMemoryDef) { BasicBlock* defnBlk = reinterpret_cast<BasicBlock*>(vnStore->ConstantValue<ssize_t>(funcApp.m_args[0])); res = !optLoopContains(lnum, defnBlk->bbNatLoopNum); } else if (funcApp.m_func == VNF_MemOpaque) { const unsigned vnLoopNum = funcApp.m_args[0]; // Check for the special "ambiguous" loop MemOpaque VN. // This is considered variant in every loop. // if (vnLoopNum == BasicBlock::MAX_LOOP_NUM) { res = false; } else { res = !optLoopContains(lnum, vnLoopNum); } } else { for (unsigned i = 0; i < funcApp.m_arity; i++) { // 4th arg of mapStore identifies the loop where the store happens. // if (funcApp.m_func == VNF_MapStore) { assert(funcApp.m_arity == 4); if (i == 3) { const unsigned vnLoopNum = funcApp.m_args[3]; res = !optLoopContains(lnum, vnLoopNum); break; } } // TODO-CQ: We need to either make sure that *all* VN functions // always take VN args, or else have a list of arg positions to exempt, as implicitly // constant. if (!optVNIsLoopInvariant(funcApp.m_args[i], lnum, loopVnInvariantCache)) { res = false; break; } } } } loopVnInvariantCache->Set(vn, res); return res; } //------------------------------------------------------------------------------ // fgCreateLoopPreHeader: Creates a pre-header block for the given loop. // A pre-header is a block outside the loop that falls through or branches to the loop // entry block. It is the only non-loop predecessor block to the entry block (thus, it // dominates the entry block). The pre-header replaces the current lpHead in the loop table. // The pre-header will be placed immediately before the loop top block, which is the first // block of the loop in program order. // // Once a loop has a pre-header, calling this function will immediately return without // creating another. // // If there already exists a block that meets the pre-header requirements, that block is marked // as a pre-header, and no flow graph modification is made. // // Note that the pre-header block can be in a different EH region from blocks in the loop, including the // entry block. Code doing hoisting is required to check the EH legality of hoisting to the pre-header // before doing so. // // Since the flow graph has changed, if needed, fgUpdateChangedFlowGraph() should be called after this // to update the block numbers, reachability, and dominators. The loop table does not need to be rebuilt. // The new pre-header block does have a copy of the previous 'head' reachability set, but the pre-header // itself doesn't exist in any reachability/dominator sets. `fgDominate` has code to specifically // handle queries about the pre-header dominating other blocks, even without re-computing dominators. // The preds lists have been maintained. // // Currently, if you create a pre-header but don't put any code in it, any subsequent fgUpdateFlowGraph() // pass might choose to compact the empty pre-header with a predecessor block. That is, a pre-header // block might disappear if not used. // // The code does not depend on the order of the BasicBlock bbNum. // // Arguments: // lnum - loop index // void Compiler::fgCreateLoopPreHeader(unsigned lnum) { #ifdef DEBUG if (verbose) { printf("*************** In fgCreateLoopPreHeader for " FMT_LP "\n", lnum); } #endif // DEBUG LoopDsc& loop = optLoopTable[lnum]; // Have we already created a loop-preheader block? if (loop.lpFlags & LPFLG_HAS_PREHEAD) { JITDUMP(" pre-header already exists\n"); INDEBUG(loop.lpValidatePreHeader()); return; } BasicBlock* head = loop.lpHead; BasicBlock* top = loop.lpTop; BasicBlock* entry = loop.lpEntry; // Ensure that lpHead always dominates lpEntry noway_assert(fgDominate(head, entry)); // If `head` is already a valid pre-header, then mark it so. if (head->GetUniqueSucc() == entry) { // The loop entry must have a single non-loop predecessor, which is the pre-header. bool loopHasProperEntryBlockPreds = true; for (BasicBlock* const predBlock : entry->PredBlocks()) { if (head == predBlock) { continue; } const bool intraLoopPred = optLoopContains(lnum, predBlock->bbNatLoopNum); if (!intraLoopPred) { loopHasProperEntryBlockPreds = false; break; } } if (loopHasProperEntryBlockPreds) { // Does this existing region have the same EH region index that we will use when we create the pre-header? // If not, we want to create a new pre-header with the expected region. bool headHasCorrectEHRegion = false; if ((top->bbFlags & BBF_TRY_BEG) != 0) { assert(top->hasTryIndex()); unsigned newTryIndex = ehTrueEnclosingTryIndexIL(top->getTryIndex()); unsigned compareTryIndex = head->hasTryIndex() ? head->getTryIndex() : EHblkDsc::NO_ENCLOSING_INDEX; headHasCorrectEHRegion = newTryIndex == compareTryIndex; } else { headHasCorrectEHRegion = BasicBlock::sameTryRegion(head, top); } if (headHasCorrectEHRegion) { JITDUMP(" converting existing header " FMT_BB " into pre-header\n", head->bbNum); loop.lpFlags |= LPFLG_HAS_PREHEAD; assert((head->bbFlags & BBF_LOOP_PREHEADER) == 0); // It isn't already a loop pre-header head->bbFlags |= BBF_LOOP_PREHEADER; INDEBUG(loop.lpValidatePreHeader()); INDEBUG(fgDebugCheckLoopTable()); return; } else { JITDUMP(" existing head " FMT_BB " doesn't have correct EH region\n", head->bbNum); } } else { JITDUMP(" existing head " FMT_BB " isn't unique non-loop predecessor of loop entry\n", head->bbNum); } } else { JITDUMP(" existing head " FMT_BB " doesn't have unique successor branching to loop entry\n", head->bbNum); } // Allocate a new basic block for the pre-header. const bool isTopEntryLoop = loop.lpIsTopEntry(); BasicBlock* preHead = bbNewBasicBlock(isTopEntryLoop ? BBJ_NONE : BBJ_ALWAYS); preHead->bbFlags |= BBF_INTERNAL | BBF_LOOP_PREHEADER; if (!isTopEntryLoop) { preHead->bbJumpDest = entry; } // Must set IL code offset preHead->bbCodeOffs = top->bbCodeOffs; // Set the default value of the preHead weight in case we don't have // valid profile data and since this blocks weight is just an estimate // we clear any BBF_PROF_WEIGHT flag that we may have picked up from head. // preHead->inheritWeight(head); preHead->bbFlags &= ~BBF_PROF_WEIGHT; // Copy the bbReach set from head for the new preHead block preHead->bbReach = BlockSetOps::MakeEmpty(this); BlockSetOps::Assign(this, preHead->bbReach, head->bbReach); // Also include 'head' in the preHead bbReach set BlockSetOps::AddElemD(this, preHead->bbReach, head->bbNum); #ifdef DEBUG if (verbose) { printf("\nCreated PreHeader (" FMT_BB ") for loop " FMT_LP " (" FMT_BB " - " FMT_BB, preHead->bbNum, lnum, top->bbNum, loop.lpBottom->bbNum); if (!isTopEntryLoop) { printf(", entry " FMT_BB, entry->bbNum); } printf("), with weight = %s\n", refCntWtd2str(preHead->getBBWeight(this))); } #endif // The preheader block is part of the containing loop (if any). preHead->bbNatLoopNum = loop.lpParent; if (fgIsUsingProfileWeights() && (head->bbJumpKind == BBJ_COND)) { if ((head->bbWeight == BB_ZERO_WEIGHT) || (entry->bbWeight == BB_ZERO_WEIGHT)) { preHead->bbWeight = BB_ZERO_WEIGHT; preHead->bbFlags |= BBF_RUN_RARELY; } else { // Allow for either the fall-through or branch to target 'entry'. BasicBlock* skipLoopBlock; if (head->bbNext == entry) { skipLoopBlock = head->bbJumpDest; } else { skipLoopBlock = head->bbNext; } assert(skipLoopBlock != entry); bool allValidProfileWeights = (head->hasProfileWeight() && skipLoopBlock->hasProfileWeight() && entry->hasProfileWeight()); if (allValidProfileWeights) { weight_t loopEnteredCount = 0; weight_t loopSkippedCount = 0; bool useEdgeWeights = fgHaveValidEdgeWeights; if (useEdgeWeights) { const flowList* edgeToEntry = fgGetPredForBlock(entry, head); const flowList* edgeToSkipLoop = fgGetPredForBlock(skipLoopBlock, head); noway_assert(edgeToEntry != nullptr); noway_assert(edgeToSkipLoop != nullptr); loopEnteredCount = (edgeToEntry->edgeWeightMin() + edgeToEntry->edgeWeightMax()) / 2.0; loopSkippedCount = (edgeToSkipLoop->edgeWeightMin() + edgeToSkipLoop->edgeWeightMax()) / 2.0; // Watch out for cases where edge weights were not properly maintained // so that it appears no profile flow enters the loop. // useEdgeWeights = !fgProfileWeightsConsistent(loopEnteredCount, BB_ZERO_WEIGHT); } if (!useEdgeWeights) { loopEnteredCount = entry->bbWeight; loopSkippedCount = skipLoopBlock->bbWeight; } weight_t loopTakenRatio = loopEnteredCount / (loopEnteredCount + loopSkippedCount); JITDUMP("%s edge weights; loopEnterCount " FMT_WT " loopSkipCount " FMT_WT " taken ratio " FMT_WT "\n", fgHaveValidEdgeWeights ? (useEdgeWeights ? "valid" : "ignored") : "invalid", loopEnteredCount, loopSkippedCount, loopTakenRatio); // Calculate a good approximation of the preHead's block weight weight_t preHeadWeight = (head->bbWeight * loopTakenRatio); preHead->setBBProfileWeight(preHeadWeight); noway_assert(!preHead->isRunRarely()); } } } // Link in the preHead block fgInsertBBbefore(top, preHead); // Ideally we would re-run SSA and VN if we optimized by doing loop hoisting. // However, that is too expensive at this point. Instead, we update the phi // node block references, if we created pre-header block due to hoisting. // This is sufficient because any definition participating in SSA that flowed // into the phi via the loop header block will now flow through the preheader // block from the header block. for (Statement* const stmt : top->Statements()) { GenTree* tree = stmt->GetRootNode(); if (tree->OperGet() != GT_ASG) { break; } GenTree* op2 = tree->gtGetOp2(); if (op2->OperGet() != GT_PHI) { break; } for (GenTreePhi::Use& use : op2->AsPhi()->Uses()) { GenTreePhiArg* phiArg = use.GetNode()->AsPhiArg(); if (phiArg->gtPredBB == head) { phiArg->gtPredBB = preHead; } } } // In which EH region should the pre-header live? // // The pre-header block is added immediately before `top`. // // The `top` block cannot be the first block of a filter or handler: `top` must have a back-edge from a // BBJ_COND or BBJ_ALWAYS within the loop, and a filter or handler cannot be branched to like that. // // The `top` block can be the first block of a `try` region, and you can fall into or branch to the // first block of a `try` region. (For top-entry loops, `top` will both be the target of a back-edge // and a fall-through from the previous block.) // // If the `top` block is NOT the first block of a `try` region, the pre-header can simply extend the // `top` block region. // // If the `top` block IS the first block of a `try`, we find its parent region and use that. For mutual-protect // regions, we need to find the actual parent, as the block stores the most "nested" mutual region. For // non-mutual-protect regions, due to EH canonicalization, we are guaranteed that no other EH regions begin // on the same block, so looking to just the parent is sufficient. Note that we can't just extend the EH // region of `top` to the pre-header, because `top` will still be the target of backward branches from // within the loop. If those backward branches come from outside the `try` (say, only the top half of the loop // is a `try` region), then we can't branch to a non-first `try` region block (you always must entry the `try` // in the first block). // // Note that hoisting any code out of a try region, for example, to a pre-header block in a different // EH region, needs to ensure that no exceptions will be thrown. assert(!fgIsFirstBlockOfFilterOrHandler(top)); if ((top->bbFlags & BBF_TRY_BEG) != 0) { // `top` is the beginning of a try block. Figure out the EH region to use. assert(top->hasTryIndex()); unsigned short newTryIndex = (unsigned short)ehTrueEnclosingTryIndexIL(top->getTryIndex()); if (newTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { // No EH try index. preHead->clearTryIndex(); } else { preHead->setTryIndex(newTryIndex); } // What handler region to use? Use the same handler region as `top`. preHead->copyHndIndex(top); } else { // `top` is not the beginning of a try block. Just extend the EH region to the pre-header. // We don't need to call `fgExtendEHRegionBefore()` because all the special handling that function // does it to account for `top` being the first block of a `try` or handler region, which we know // is not true. preHead->copyEHRegion(top); } // TODO-CQ: set dominators for this block, to allow loop optimizations requiring them // (e.g: hoisting expression in a loop with the same 'head' as this one) // Update the loop table loop.lpHead = preHead; loop.lpFlags |= LPFLG_HAS_PREHEAD; // The new block becomes the 'head' of the loop - update bbRefs and bbPreds. // All non-loop predecessors of 'entry' now jump to 'preHead'. preHead->bbRefs = 0; bool checkNestedLoops = false; for (BasicBlock* const predBlock : entry->PredBlocks()) { // Is the predBlock in the loop? // // We want to use: // const bool intraLoopPred = loop.lpContains(predBlock); // but we can't depend on the bbNum ordering. // // Previously, this code wouldn't redirect predecessors dominated by the entry. However, that can // lead to a case where non-loop predecessor is dominated by the loop entry, and that predecessor // continues to branch to the entry, not the new pre-header. This is normally ok for hoisting // because it will introduce an SSA PHI def within the loop, which will inhibit hoisting. However, // it complicates the definition of what a pre-header is. const bool intraLoopPred = optLoopContains(lnum, predBlock->bbNatLoopNum); if (intraLoopPred) { if (predBlock != loop.lpBottom) { checkNestedLoops = true; } continue; } switch (predBlock->bbJumpKind) { case BBJ_NONE: // This 'entry' predecessor that isn't dominated by 'entry' must be outside the loop, // meaning it must be fall-through to 'entry', and we must have a top-entry loop. noway_assert((entry == top) && (predBlock == head) && (predBlock->bbNext == preHead)); fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); break; case BBJ_COND: if (predBlock->bbJumpDest == entry) { predBlock->bbJumpDest = preHead; noway_assert(predBlock->bbNext != preHead); } else { noway_assert((entry == top) && (predBlock == head) && (predBlock->bbNext == preHead)); } fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); break; case BBJ_ALWAYS: case BBJ_EHCATCHRET: noway_assert(predBlock->bbJumpDest == entry); predBlock->bbJumpDest = preHead; fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); break; case BBJ_SWITCH: unsigned jumpCnt; jumpCnt = predBlock->bbJumpSwt->bbsCount; BasicBlock** jumpTab; jumpTab = predBlock->bbJumpSwt->bbsDstTab; do { assert(*jumpTab); if ((*jumpTab) == entry) { (*jumpTab) = preHead; fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); } } while (++jumpTab, --jumpCnt); UpdateSwitchTableTarget(predBlock, entry, preHead); break; default: noway_assert(!"Unexpected bbJumpKind"); break; } } flowList* const edgeToPreHeader = fgGetPredForBlock(preHead, head); noway_assert(edgeToPreHeader != nullptr); edgeToPreHeader->setEdgeWeights(preHead->bbWeight, preHead->bbWeight, preHead); noway_assert(fgGetPredForBlock(entry, preHead) == nullptr); flowList* const edgeFromPreHeader = fgAddRefPred(entry, preHead); edgeFromPreHeader->setEdgeWeights(preHead->bbWeight, preHead->bbWeight, entry); /* If we found at least one back-edge in the flowgraph pointing to the entry of the loop (other than the back-edge of the loop we are considering) then we likely have nested do-while loops with the same entry block and inserting the preheader block changes the head of all the nested loops. Now we will update this piece of information in the loop table, and mark all nested loops as having a preheader (the preheader block can be shared among all nested do-while loops with the same entry block). */ if (checkNestedLoops) { for (unsigned l = 0; l < optLoopCount; l++) { if (optLoopTable[l].lpHead == head) { // loop.lpHead was already changed from 'head' to 'preHead' noway_assert(l != lnum); // If it shares head, it must be a top-entry loop that shares top. noway_assert(optLoopTable[l].lpEntry == top); optUpdateLoopHead(l, optLoopTable[l].lpHead, preHead); optLoopTable[l].lpFlags |= LPFLG_HAS_PREHEAD; #ifdef DEBUG if (verbose) { printf("Same PreHeader (" FMT_BB ") can be used for loop " FMT_LP " (" FMT_BB " - " FMT_BB ")\n\n", preHead->bbNum, l, top->bbNum, optLoopTable[l].lpBottom->bbNum); } #endif } } } // We added a new block and altered the preds list; make sure the flow graph has been marked as being modified. assert(fgModified); #ifdef DEBUG fgDebugCheckBBlist(); fgVerifyHandlerTab(); fgDebugCheckLoopTable(); if (verbose) { JITDUMP("*************** After fgCreateLoopPreHeader for " FMT_LP "\n", lnum); fgDispBasicBlocks(); fgDispHandlerTab(); optPrintLoopTable(); } #endif } bool Compiler::optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum) { for (unsigned lnum = blk->bbNatLoopNum; lnum != BasicBlock::NOT_IN_LOOP; lnum = optLoopTable[lnum].lpParent) { if (optLoopTable[lnum].lpFlags & LPFLG_REMOVED) { continue; } if (optLoopTable[lnum].lpEntry == blk) { *pLnum = lnum; return true; } } return false; } void Compiler::optComputeLoopSideEffects() { unsigned lnum; for (lnum = 0; lnum < optLoopCount; lnum++) { VarSetOps::AssignNoCopy(this, optLoopTable[lnum].lpVarInOut, VarSetOps::MakeEmpty(this)); VarSetOps::AssignNoCopy(this, optLoopTable[lnum].lpVarUseDef, VarSetOps::MakeEmpty(this)); optLoopTable[lnum].lpFlags &= ~LPFLG_CONTAINS_CALL; } for (lnum = 0; lnum < optLoopCount; lnum++) { if (optLoopTable[lnum].lpFlags & LPFLG_REMOVED) { continue; } if (optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP) { // Is outermost... optComputeLoopNestSideEffects(lnum); } } VarSetOps::AssignNoCopy(this, lvaFloatVars, VarSetOps::MakeEmpty(this)); #ifndef TARGET_64BIT VarSetOps::AssignNoCopy(this, lvaLongVars, VarSetOps::MakeEmpty(this)); #endif for (unsigned i = 0; i < lvaCount; i++) { LclVarDsc* varDsc = lvaGetDesc(i); if (varDsc->lvTracked) { if (varTypeIsFloating(varDsc->lvType)) { VarSetOps::AddElemD(this, lvaFloatVars, varDsc->lvVarIndex); } #ifndef TARGET_64BIT else if (varTypeIsLong(varDsc->lvType)) { VarSetOps::AddElemD(this, lvaLongVars, varDsc->lvVarIndex); } #endif } } } void Compiler::optComputeLoopNestSideEffects(unsigned lnum) { JITDUMP("optComputeLoopNestSideEffects for " FMT_LP "\n", lnum); assert(optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP); // Requires: lnum is outermost. for (BasicBlock* const bbInLoop : optLoopTable[lnum].LoopBlocks()) { if (!optComputeLoopSideEffectsOfBlock(bbInLoop)) { // When optComputeLoopSideEffectsOfBlock returns false, we encountered // a block that was moved into the loop range (by fgReorderBlocks), // but not marked correctly as being inside the loop. // We conservatively mark this loop (and any outer loops) // as having memory havoc side effects. // // Record that all loops containing this block have memory havoc effects. // optRecordLoopNestsMemoryHavoc(lnum, fullMemoryKindSet); // All done, no need to keep visiting more blocks break; } } } void Compiler::optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc) { // We should start out with 'lnum' set to a valid natural loop index assert(lnum != BasicBlock::NOT_IN_LOOP); while (lnum != BasicBlock::NOT_IN_LOOP) { for (MemoryKind memoryKind : allMemoryKinds()) { if ((memoryHavoc & memoryKindSet(memoryKind)) != 0) { optLoopTable[lnum].lpLoopHasMemoryHavoc[memoryKind] = true; } } // Move lnum to the next outtermost loop that we need to mark lnum = optLoopTable[lnum].lpParent; } } bool Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk) { unsigned mostNestedLoop = blk->bbNatLoopNum; JITDUMP("optComputeLoopSideEffectsOfBlock " FMT_BB ", mostNestedLoop %d\n", blk->bbNum, mostNestedLoop); if (mostNestedLoop == BasicBlock::NOT_IN_LOOP) { return false; } AddVariableLivenessAllContainingLoops(mostNestedLoop, blk); // MemoryKinds for which an in-loop call or store has arbitrary effects. MemoryKindSet memoryHavoc = emptyMemoryKindSet; // Now iterate over the remaining statements, and their trees. for (Statement* const stmt : blk->NonPhiStatements()) { for (GenTree* const tree : stmt->TreeList()) { genTreeOps oper = tree->OperGet(); // Even after we set memoryHavoc we still may want to know if a loop contains calls if (memoryHavoc == fullMemoryKindSet) { if (oper == GT_CALL) { // Record that this loop contains a call AddContainsCallAllContainingLoops(mostNestedLoop); } // If we just set LPFLG_CONTAINS_CALL or it was previously set if (optLoopTable[mostNestedLoop].lpFlags & LPFLG_CONTAINS_CALL) { // We can early exit after both memoryHavoc and LPFLG_CONTAINS_CALL are both set to true. break; } // We are just looking for GT_CALL nodes after memoryHavoc was set. continue; } // otherwise memoryHavoc is not set for at least one heap ID assert(memoryHavoc != fullMemoryKindSet); // This body is a distillation of the memory side-effect code of value numbering. // We also do a very limited analysis if byref PtrTo values, to cover some cases // that the compiler creates. if (oper == GT_ASG) { GenTree* lhs = tree->AsOp()->gtOp1->gtEffectiveVal(/*commaOnly*/ true); if (lhs->OperGet() == GT_IND) { GenTree* arg = lhs->AsOp()->gtOp1->gtEffectiveVal(/*commaOnly*/ true); FieldSeqNode* fldSeqArrElem = nullptr; if ((tree->gtFlags & GTF_IND_VOLATILE) != 0) { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); continue; } ArrayInfo arrInfo; if (arg->TypeGet() == TYP_BYREF && arg->OperGet() == GT_LCL_VAR) { // If it's a local byref for which we recorded a value number, use that... GenTreeLclVar* argLcl = arg->AsLclVar(); if (lvaInSsa(argLcl->GetLclNum()) && argLcl->HasSsaName()) { ValueNum argVN = lvaTable[argLcl->GetLclNum()].GetPerSsaData(argLcl->GetSsaNum())->m_vnPair.GetLiberal(); VNFuncApp funcApp; if (argVN != ValueNumStore::NoVN && vnStore->GetVNFunc(argVN, &funcApp) && funcApp.m_func == VNF_PtrToArrElem) { assert(vnStore->IsVNHandle(funcApp.m_args[0])); CORINFO_CLASS_HANDLE elemType = CORINFO_CLASS_HANDLE(vnStore->ConstantValue<size_t>(funcApp.m_args[0])); AddModifiedElemTypeAllContainingLoops(mostNestedLoop, elemType); // Don't set memoryHavoc for GcHeap below. Do set memoryHavoc for ByrefExposed // (conservatively assuming that a byref may alias the array element) memoryHavoc |= memoryKindSet(ByrefExposed); continue; } } // Otherwise... memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } // Is the LHS an array index expression? else if (lhs->ParseArrayElemForm(this, &arrInfo, &fldSeqArrElem)) { // We actually ignore "fldSeq" -- any modification to an S[], at any // field of "S", will lose all information about the array type. CORINFO_CLASS_HANDLE elemTypeEq = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType); AddModifiedElemTypeAllContainingLoops(mostNestedLoop, elemTypeEq); // Conservatively assume byrefs may alias this array element memoryHavoc |= memoryKindSet(ByrefExposed); } else { GenTree* baseAddr = nullptr; FieldSeqNode* fldSeq = nullptr; if (arg->IsFieldAddr(this, &baseAddr, &fldSeq)) { assert((fldSeq != nullptr) && (fldSeq != FieldSeqStore::NotAField()) && !fldSeq->IsPseudoField()); FieldKindForVN fieldKind = (baseAddr != nullptr) ? FieldKindForVN::WithBaseAddr : FieldKindForVN::SimpleStatic; AddModifiedFieldAllContainingLoops(mostNestedLoop, fldSeq->GetFieldHandle(), fieldKind); // Conservatively assume byrefs may alias this object. memoryHavoc |= memoryKindSet(ByrefExposed); } else { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } } } else if (lhs->OperIsBlk()) { GenTreeLclVarCommon* lclVarTree; bool isEntire; if (!tree->DefinesLocal(this, &lclVarTree, &isEntire)) { // For now, assume arbitrary side effects on GcHeap/ByrefExposed... memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } else if (lvaVarAddrExposed(lclVarTree->GetLclNum())) { memoryHavoc |= memoryKindSet(ByrefExposed); } } else if (lhs->OperGet() == GT_CLS_VAR) { AddModifiedFieldAllContainingLoops(mostNestedLoop, lhs->AsClsVar()->gtClsVarHnd, FieldKindForVN::SimpleStatic); // Conservatively assume byrefs may alias this static field memoryHavoc |= memoryKindSet(ByrefExposed); } // Otherwise, must be local lhs form. I should assert that. else if (lhs->OperGet() == GT_LCL_VAR) { GenTreeLclVar* lhsLcl = lhs->AsLclVar(); GenTree* rhs = tree->AsOp()->gtOp2; ValueNum rhsVN = rhs->gtVNPair.GetLiberal(); // If we gave the RHS a value number, propagate it. if (rhsVN != ValueNumStore::NoVN) { rhsVN = vnStore->VNNormalValue(rhsVN); if (lvaInSsa(lhsLcl->GetLclNum()) && lhsLcl->HasSsaName()) { lvaTable[lhsLcl->GetLclNum()] .GetPerSsaData(lhsLcl->GetSsaNum()) ->m_vnPair.SetLiberal(rhsVN); } } // If the local is address-exposed, count this as ByrefExposed havoc if (lvaVarAddrExposed(lhsLcl->GetLclNum())) { memoryHavoc |= memoryKindSet(ByrefExposed); } } } else // if (oper != GT_ASG) { switch (oper) { case GT_COMMA: tree->gtVNPair = tree->AsOp()->gtOp2->gtVNPair; break; case GT_ADDR: // Is it an addr of a array index expression? { GenTree* addrArg = tree->AsOp()->gtOp1; if (addrArg->OperGet() == GT_IND) { // Is the LHS an array index expression? if (addrArg->gtFlags & GTF_IND_ARR_INDEX) { ArrayInfo arrInfo; bool b = GetArrayInfoMap()->Lookup(addrArg, &arrInfo); assert(b); CORINFO_CLASS_HANDLE elemTypeEq = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType); ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL); ValueNum ptrToArrElemVN = vnStore->VNForFunc(TYP_BYREF, VNF_PtrToArrElem, elemTypeEqVN, // The rest are dummy arguments. vnStore->VNForNull(), vnStore->VNForNull(), vnStore->VNForNull()); tree->gtVNPair.SetBoth(ptrToArrElemVN); } } } break; #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: if (tree->AsHWIntrinsic()->OperIsMemoryStore()) { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } break; #endif // FEATURE_HW_INTRINSICS case GT_LOCKADD: case GT_XORR: case GT_XAND: case GT_XADD: case GT_XCHG: case GT_CMPXCHG: case GT_MEMORYBARRIER: case GT_STORE_DYN_BLK: { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } break; case GT_CALL: { GenTreeCall* call = tree->AsCall(); // Record that this loop contains a call AddContainsCallAllContainingLoops(mostNestedLoop); if (call->gtCallType == CT_HELPER) { CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd); if (s_helperCallProperties.MutatesHeap(helpFunc)) { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } else if (s_helperCallProperties.MayRunCctor(helpFunc)) { // If the call is labeled as "Hoistable", then we've checked the // class that would be constructed, and it is not precise-init, so // the cctor will not be run by this call. Otherwise, it might be, // and might have arbitrary side effects. if ((tree->gtFlags & GTF_CALL_HOISTABLE) == 0) { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } } } else { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } break; } default: // All other gtOper node kinds, leave 'memoryHavoc' unchanged (i.e. false) assert(!tree->OperRequiresAsgFlag()); break; } } } } if (memoryHavoc != emptyMemoryKindSet) { // Record that all loops containing this block have this kind of memoryHavoc effects. optRecordLoopNestsMemoryHavoc(mostNestedLoop, memoryHavoc); } return true; } // Marks the containsCall information to "lnum" and any parent loops. void Compiler::AddContainsCallAllContainingLoops(unsigned lnum) { #if FEATURE_LOOP_ALIGN // If this is the inner most loop, reset the LOOP_ALIGN flag // because a loop having call will not likely to benefit from // alignment if (optLoopTable[lnum].lpChild == BasicBlock::NOT_IN_LOOP) { BasicBlock* top = optLoopTable[lnum].lpTop; top->unmarkLoopAlign(this DEBUG_ARG("Loop with call")); } #endif assert(0 <= lnum && lnum < optLoopCount); while (lnum != BasicBlock::NOT_IN_LOOP) { optLoopTable[lnum].lpFlags |= LPFLG_CONTAINS_CALL; lnum = optLoopTable[lnum].lpParent; } } // Adds the variable liveness information for 'blk' to 'this' LoopDsc void Compiler::LoopDsc::AddVariableLiveness(Compiler* comp, BasicBlock* blk) { VarSetOps::UnionD(comp, this->lpVarInOut, blk->bbLiveIn); VarSetOps::UnionD(comp, this->lpVarInOut, blk->bbLiveOut); VarSetOps::UnionD(comp, this->lpVarUseDef, blk->bbVarUse); VarSetOps::UnionD(comp, this->lpVarUseDef, blk->bbVarDef); } // Adds the variable liveness information for 'blk' to "lnum" and any parent loops. void Compiler::AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk) { assert(0 <= lnum && lnum < optLoopCount); while (lnum != BasicBlock::NOT_IN_LOOP) { optLoopTable[lnum].AddVariableLiveness(this, blk); lnum = optLoopTable[lnum].lpParent; } } // Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops. void Compiler::AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind) { assert(0 <= lnum && lnum < optLoopCount); while (lnum != BasicBlock::NOT_IN_LOOP) { optLoopTable[lnum].AddModifiedField(this, fldHnd, fieldKind); lnum = optLoopTable[lnum].lpParent; } } // Adds "elemType" to the set of modified array element types of "lnum" and any parent loops. void Compiler::AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemClsHnd) { assert(0 <= lnum && lnum < optLoopCount); while (lnum != BasicBlock::NOT_IN_LOOP) { optLoopTable[lnum].AddModifiedElemType(this, elemClsHnd); lnum = optLoopTable[lnum].lpParent; } } //------------------------------------------------------------------------------ // optRemoveRangeCheck : Given an indexing node, mark it as not needing a range check. // // Arguments: // check - Range check tree, the raw CHECK node (ARRAY, SIMD or HWINTRINSIC). // comma - GT_COMMA to which the "check" belongs, "nullptr" if the check is a standalone one. // stmt - Statement the indexing nodes belong to. // // Return Value: // Rewritten "check" - no-op if it has no side effects or the tree that contains them. // // Assumptions: // This method is capable of removing checks of two kinds: COMMA-based and standalone top-level ones. // In case of a COMMA-based check, "check" must be a non-null first operand of a non-null COMMA. // In case of a standalone check, "comma" must be null and "check" - "stmt"'s root. // GenTree* Compiler::optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt) { #if !REARRANGE_ADDS noway_assert(!"can't remove range checks without REARRANGE_ADDS right now"); #endif noway_assert(stmt != nullptr); noway_assert((comma != nullptr && comma->OperIs(GT_COMMA) && comma->gtGetOp1() == check) || (check != nullptr && check->OperIs(GT_BOUNDS_CHECK) && comma == nullptr)); noway_assert(check->OperIs(GT_BOUNDS_CHECK)); GenTree* tree = comma != nullptr ? comma : check; #ifdef DEBUG if (verbose) { printf("Before optRemoveRangeCheck:\n"); gtDispTree(tree); } #endif // Extract side effects GenTree* sideEffList = nullptr; gtExtractSideEffList(check, &sideEffList, GTF_ASG); if (sideEffList != nullptr) { // We've got some side effects. if (tree->OperIs(GT_COMMA)) { // Make the comma handle them. tree->AsOp()->gtOp1 = sideEffList; } else { // Make the statement execute them instead of the check. stmt->SetRootNode(sideEffList); tree = sideEffList; } } else { check->gtBashToNOP(); } if (tree->OperIs(GT_COMMA)) { // TODO-CQ: We should also remove the GT_COMMA, but in any case we can no longer CSE the GT_COMMA. tree->gtFlags |= GTF_DONT_CSE; } gtUpdateSideEffects(stmt, tree); // Recalculate the GetCostSz(), etc... gtSetStmtInfo(stmt); // Re-thread the nodes if necessary if (fgStmtListThreaded) { fgSetStmtSeq(stmt); } #ifdef DEBUG if (verbose) { // gtUpdateSideEffects can update the side effects for ancestors in the tree, so display the whole statement // tree, not just the sub-tree. printf("After optRemoveRangeCheck for [%06u]:\n", dspTreeID(tree)); gtDispTree(stmt->GetRootNode()); } #endif return check; } //------------------------------------------------------------------------------ // optRemoveStandaloneRangeCheck : A thin wrapper over optRemoveRangeCheck that removes standalone checks. // // Arguments: // check - The standalone top-level CHECK node. // stmt - The statement "check" is a root node of. // // Return Value: // If "check" has no side effects, it is retuned, bashed to a no-op. // If it has side effects, the tree that executes them is returned. // GenTree* Compiler::optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt) { assert(check != nullptr); assert(stmt != nullptr); assert(check == stmt->GetRootNode()); return optRemoveRangeCheck(check, nullptr, stmt); } //------------------------------------------------------------------------------ // optRemoveCommaBasedRangeCheck : A thin wrapper over optRemoveRangeCheck that removes COMMA-based checks. // // Arguments: // comma - GT_COMMA of which the first operand is the CHECK to be removed. // stmt - The statement "comma" belongs to. // void Compiler::optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt) { assert(comma != nullptr && comma->OperIs(GT_COMMA)); assert(stmt != nullptr); assert(comma->gtGetOp1()->OperIs(GT_BOUNDS_CHECK)); optRemoveRangeCheck(comma->gtGetOp1()->AsBoundsChk(), comma, stmt); } /***************************************************************************** * Return the scale in an array reference, given a pointer to the * multiplication node. */ ssize_t Compiler::optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk)) { assert(mul); assert(mul->gtOper == GT_MUL || mul->gtOper == GT_LSH); assert(mul->AsOp()->gtOp2->IsCnsIntOrI()); ssize_t scale = mul->AsOp()->gtOp2->AsIntConCommon()->IconValue(); if (mul->gtOper == GT_LSH) { scale = ((ssize_t)1) << scale; } GenTree* index = mul->AsOp()->gtOp1; if (index->gtOper == GT_MUL && index->AsOp()->gtOp2->IsCnsIntOrI()) { // case of two cascading multiplications for constant int (e.g. * 20 morphed to * 5 * 4): // When index->gtOper is GT_MUL and index->AsOp()->gtOp2->gtOper is GT_CNS_INT (i.e. * 5), // we can bump up the scale from 4 to 5*4, and then change index to index->AsOp()->gtOp1. // Otherwise, we cannot optimize it. We will simply keep the original scale and index. scale *= index->AsOp()->gtOp2->AsIntConCommon()->IconValue(); index = index->AsOp()->gtOp1; } assert(!bRngChk || index->gtOper != GT_COMMA); if (pIndex) { *pIndex = index; } return scale; } //----------------------------------------------------------------------------- // OptTestInfo: Member of OptBoolsDsc struct used to test if a GT_JTRUE or GT_RETURN node // is a boolean comparison // struct OptTestInfo { GenTree* testTree; // The root node of basic block with GT_JTRUE or GT_RETURN type to check boolean condition on GenTree* compTree; // The compare node (i.e. GT_EQ or GT_NE node) of the testTree bool isBool; // If the compTree is boolean expression }; //----------------------------------------------------------------------------- // OptBoolsDsc: Descriptor used for Boolean Optimization // class OptBoolsDsc { public: OptBoolsDsc(BasicBlock* b1, BasicBlock* b2, Compiler* comp) { m_b1 = b1; m_b2 = b2; m_b3 = nullptr; m_comp = comp; } private: BasicBlock* m_b1; // The first basic block with the BBJ_COND conditional jump type BasicBlock* m_b2; // The next basic block of m_b1. Either BBJ_COND or BBJ_RETURN type BasicBlock* m_b3; // m_b1->bbJumpDest. Null if m_b2 is not a return block. Compiler* m_comp; // The pointer to the Compiler instance OptTestInfo m_testInfo1; // The first test info OptTestInfo m_testInfo2; // The second test info GenTree* m_t3; // The root node of the first statement of m_b3 GenTree* m_c1; // The first operand of m_testInfo1.compTree GenTree* m_c2; // The first operand of m_testInfo2.compTree bool m_sameTarget; // if m_b1 and m_b2 jumps to the same destination genTreeOps m_foldOp; // The fold operator (e.g., GT_AND or GT_OR) var_types m_foldType; // The type of the folded tree genTreeOps m_cmpOp; // The comparison operator (e.g., GT_EQ or GT_NE) public: bool optOptimizeBoolsCondBlock(); bool optOptimizeBoolsReturnBlock(BasicBlock* b3); #ifdef DEBUG void optOptimizeBoolsGcStress(); #endif private: Statement* optOptimizeBoolsChkBlkCond(); GenTree* optIsBoolComp(OptTestInfo* pOptTest); bool optOptimizeBoolsChkTypeCostCond(); void optOptimizeBoolsUpdateTrees(); }; //----------------------------------------------------------------------------- // optOptimizeBoolsCondBlock: Optimize boolean when bbJumpKind of both m_b1 and m_b2 are BBJ_COND // // Returns: // true if boolean optimization is done and m_b1 and m_b2 are folded into m_b1, else false. // // Notes: // m_b1 and m_b2 are set on entry. // // Case 1: if b1.bbJumpDest == b2.bbJumpDest, it transforms // B1 : brtrue(t1, Bx) // B2 : brtrue(t2, Bx) // B3 : // to // B1 : brtrue(t1|t2, BX) // B3 : // // For example, (x == 0 && y == 0 && z == 0) generates // B1: GT_JTRUE (BBJ_COND), jump to B4 // B2: GT_JTRUE (BBJ_COND), jump to B4 // B3: GT_RETURN (BBJ_RETURN) // B4: GT_RETURN (BBJ_RETURN) // and B1 and B2 are folded into B1: // B1: GT_JTRUE (BBJ_COND), jump to B4 // B3: GT_RETURN (BBJ_RETURN) // B4: GT_RETURN (BBJ_RETURN) // // Case 2: if B1.bbJumpDest == B2->bbNext, it transforms // B1 : brtrue(t1, B3) // B2 : brtrue(t2, Bx) // B3 : // to // B1 : brtrue((!t1) && t2, Bx) // B3 : // bool OptBoolsDsc::optOptimizeBoolsCondBlock() { assert(m_b1 != nullptr && m_b2 != nullptr && m_b3 == nullptr); // Check if m_b1 and m_b2 jump to the same target and get back pointers to m_testInfo1 and t2 tree nodes m_t3 = nullptr; // Check if m_b1 and m_b2 have the same bbJumpDest if (m_b1->bbJumpDest == m_b2->bbJumpDest) { // Given the following sequence of blocks : // B1: brtrue(t1, BX) // B2: brtrue(t2, BX) // B3: // we will try to fold it to : // B1: brtrue(t1|t2, BX) // B3: m_sameTarget = true; } else if (m_b1->bbJumpDest == m_b2->bbNext) { // Given the following sequence of blocks : // B1: brtrue(t1, B3) // B2: brtrue(t2, BX) // B3: // we will try to fold it to : // B1: brtrue((!t1)&&t2, BX) // B3: m_sameTarget = false; } else { return false; } Statement* const s1 = optOptimizeBoolsChkBlkCond(); if (s1 == nullptr) { return false; } // Find the branch conditions of m_b1 and m_b2 m_c1 = optIsBoolComp(&m_testInfo1); if (m_c1 == nullptr) { return false; } m_c2 = optIsBoolComp(&m_testInfo2); if (m_c2 == nullptr) { return false; } // Find the type and cost conditions of m_testInfo1 and m_testInfo2 if (!optOptimizeBoolsChkTypeCostCond()) { return false; } // Get the fold operator and the comparison operator genTreeOps foldOp; genTreeOps cmpOp; var_types foldType = m_c1->TypeGet(); if (varTypeIsGC(foldType)) { foldType = TYP_I_IMPL; } assert(m_testInfo1.compTree->gtOper == GT_EQ || m_testInfo1.compTree->gtOper == GT_NE); if (m_sameTarget) { // Both conditions must be the same if (m_testInfo1.compTree->gtOper != m_testInfo2.compTree->gtOper) { return false; } if (m_testInfo1.compTree->gtOper == GT_EQ) { // t1:c1==0 t2:c2==0 ==> Branch to BX if either value is 0 // So we will branch to BX if (c1&c2)==0 foldOp = GT_AND; cmpOp = GT_EQ; } else { // t1:c1!=0 t2:c2!=0 ==> Branch to BX if either value is non-0 // So we will branch to BX if (c1|c2)!=0 foldOp = GT_OR; cmpOp = GT_NE; } } else { // The m_b1 condition must be the reverse of the m_b2 condition because the only operators // that we will see here are GT_EQ and GT_NE. So, if they are not the same, we have one of each. if (m_testInfo1.compTree->gtOper == m_testInfo2.compTree->gtOper) { return false; } if (m_testInfo1.compTree->gtOper == GT_EQ) { // t1:c1==0 t2:c2!=0 ==> Branch to BX if both values are non-0 // So we will branch to BX if (c1&c2)!=0 foldOp = GT_AND; cmpOp = GT_NE; } else { // t1:c1!=0 t2:c2==0 ==> Branch to BX if both values are 0 // So we will branch to BX if (c1|c2)==0 foldOp = GT_OR; cmpOp = GT_EQ; } } // Anding requires both values to be 0 or 1 if ((foldOp == GT_AND) && (!m_testInfo1.isBool || !m_testInfo2.isBool)) { return false; } // // Now update the trees // m_foldOp = foldOp; m_foldType = foldType; m_cmpOp = cmpOp; optOptimizeBoolsUpdateTrees(); #ifdef DEBUG if (m_comp->verbose) { printf("Folded %sboolean conditions of " FMT_BB " and " FMT_BB " to :\n", m_c2->OperIsLeaf() ? "" : "non-leaf ", m_b1->bbNum, m_b2->bbNum); m_comp->gtDispStmt(s1); printf("\n"); } #endif // Return true to continue the bool optimization for the rest of the BB chain return true; } //----------------------------------------------------------------------------- // optOptimizeBoolsChkBlkCond: Checks block conditions if it can be boolean optimized // // Return: // If all conditions pass, returns the last statement of m_b1, else return nullptr. // // Notes: // This method checks if the second (and third block for cond/return/return case) contains only one statement, // and checks if tree operators are of the right type, e.g, GT_JTRUE, GT_RETURN. // // On entry, m_b1, m_b2 are set and m_b3 is set for cond/return/return case. // If it passes all the conditions, m_testInfo1.testTree, m_testInfo2.testTree and m_t3 are set // to the root nodes of m_b1, m_b2 and m_b3 each. // SameTarget is also updated to true if m_b1 and m_b2 jump to the same destination. // Statement* OptBoolsDsc::optOptimizeBoolsChkBlkCond() { assert(m_b1 != nullptr && m_b2 != nullptr); bool optReturnBlock = false; if (m_b3 != nullptr) { optReturnBlock = true; } // Find the block conditions of m_b1 and m_b2 if (m_b2->countOfInEdges() > 1 || (optReturnBlock && m_b3->countOfInEdges() > 1)) { return nullptr; } // Find the condition for the first block Statement* s1 = m_b1->lastStmt(); GenTree* testTree1 = s1->GetRootNode(); assert(testTree1->gtOper == GT_JTRUE); // The second and the third block must contain a single statement Statement* s2 = m_b2->firstStmt(); if (s2->GetPrevStmt() != s2) { return nullptr; } GenTree* testTree2 = s2->GetRootNode(); if (!optReturnBlock) { assert(testTree2->gtOper == GT_JTRUE); } else { if (testTree2->gtOper != GT_RETURN) { return nullptr; } Statement* s3 = m_b3->firstStmt(); if (s3->GetPrevStmt() != s3) { return nullptr; } GenTree* testTree3 = s3->GetRootNode(); if (testTree3->gtOper != GT_RETURN) { return nullptr; } if (!varTypeIsIntegral(testTree2->TypeGet()) || !varTypeIsIntegral(testTree3->TypeGet())) { return nullptr; } // The third block is Return with "CNS_INT int 0/1" if (testTree3->AsOp()->gtOp1->gtOper != GT_CNS_INT) { return nullptr; } if (testTree3->AsOp()->gtOp1->gtType != TYP_INT) { return nullptr; } m_t3 = testTree3; } m_testInfo1.testTree = testTree1; m_testInfo2.testTree = testTree2; return s1; } //----------------------------------------------------------------------------- // optOptimizeBoolsChkTypeCostCond: Checks if type conditions meet the folding condition, and // if cost to fold is not too expensive // // Return: // True if it meets type conditions and cost conditions. Else false. // bool OptBoolsDsc::optOptimizeBoolsChkTypeCostCond() { assert(m_testInfo1.compTree->OperIs(GT_EQ, GT_NE) && m_testInfo1.compTree->AsOp()->gtOp1 == m_c1); assert(m_testInfo2.compTree->OperIs(GT_EQ, GT_NE) && m_testInfo2.compTree->AsOp()->gtOp1 == m_c2); // // Leave out floats where the bit-representation is more complicated // - there are two representations for 0. // if (varTypeIsFloating(m_c1->TypeGet()) || varTypeIsFloating(m_c2->TypeGet())) { return false; } // Make sure the types involved are of the same sizes if (genTypeSize(m_c1->TypeGet()) != genTypeSize(m_c2->TypeGet())) { return false; } if (genTypeSize(m_testInfo1.compTree->TypeGet()) != genTypeSize(m_testInfo2.compTree->TypeGet())) { return false; } #ifdef TARGET_ARMARCH // Skip the small operand which we cannot encode. if (varTypeIsSmall(m_c1->TypeGet())) return false; #endif // The second condition must not contain side effects if (m_c2->gtFlags & GTF_GLOB_EFFECT) { return false; } // The second condition must not be too expensive m_comp->gtPrepareCost(m_c2); if (m_c2->GetCostEx() > 12) { return false; } return true; } //----------------------------------------------------------------------------- // optOptimizeBoolsUpdateTrees: Fold the trees based on fold type and comparison type, // update the edges, unlink removed blocks and update loop table // void OptBoolsDsc::optOptimizeBoolsUpdateTrees() { assert(m_b1 != nullptr && m_b2 != nullptr); bool optReturnBlock = false; if (m_b3 != nullptr) { optReturnBlock = true; } assert(m_foldOp != NULL && m_foldType != NULL && m_c1 != nullptr && m_c2 != nullptr); GenTree* cmpOp1 = m_comp->gtNewOperNode(m_foldOp, m_foldType, m_c1, m_c2); if (m_testInfo1.isBool && m_testInfo2.isBool) { // When we 'OR'/'AND' two booleans, the result is boolean as well cmpOp1->gtFlags |= GTF_BOOLEAN; } GenTree* t1Comp = m_testInfo1.compTree; t1Comp->SetOper(m_cmpOp); t1Comp->AsOp()->gtOp1 = cmpOp1; t1Comp->AsOp()->gtOp2->gtType = m_foldType; // Could have been varTypeIsGC() if (optReturnBlock) { // Update tree when m_b1 is BBJ_COND and m_b2 and m_b3 are GT_RETURN (BBJ_RETURN) t1Comp->AsOp()->gtOp2->AsIntCon()->gtIconVal = 0; m_testInfo1.testTree->gtOper = GT_RETURN; m_testInfo1.testTree->gtType = m_testInfo2.testTree->gtType; // Update the return count of flow graph assert(m_comp->fgReturnCount >= 2); --m_comp->fgReturnCount; } #if FEATURE_SET_FLAGS // For comparisons against zero we will have the GTF_SET_FLAGS set // and this can cause an assert to fire in fgMoveOpsLeft(GenTree* tree) // during the CSE phase. // // So make sure to clear any GTF_SET_FLAGS bit on these operations // as they are no longer feeding directly into a comparisons against zero // Make sure that the GTF_SET_FLAGS bit is cleared. // Fix 388436 ARM JitStress WP7 m_c1->gtFlags &= ~GTF_SET_FLAGS; m_c2->gtFlags &= ~GTF_SET_FLAGS; // The new top level node that we just created does feed directly into // a comparison against zero, so set the GTF_SET_FLAGS bit so that // we generate an instruction that sets the flags, which allows us // to omit the cmp with zero instruction. // Request that the codegen for cmpOp1 sets the condition flags // when it generates the code for cmpOp1. // cmpOp1->gtRequestSetFlags(); #endif if (!optReturnBlock) { // Update edges if m_b1: BBJ_COND and m_b2: BBJ_COND flowList* edge1 = m_comp->fgGetPredForBlock(m_b1->bbJumpDest, m_b1); flowList* edge2; if (m_sameTarget) { edge2 = m_comp->fgGetPredForBlock(m_b2->bbJumpDest, m_b2); } else { edge2 = m_comp->fgGetPredForBlock(m_b2->bbNext, m_b2); m_comp->fgRemoveRefPred(m_b1->bbJumpDest, m_b1); m_b1->bbJumpDest = m_b2->bbJumpDest; m_comp->fgAddRefPred(m_b2->bbJumpDest, m_b1); } assert(edge1 != nullptr); assert(edge2 != nullptr); weight_t edgeSumMin = edge1->edgeWeightMin() + edge2->edgeWeightMin(); weight_t edgeSumMax = edge1->edgeWeightMax() + edge2->edgeWeightMax(); if ((edgeSumMax >= edge1->edgeWeightMax()) && (edgeSumMax >= edge2->edgeWeightMax())) { edge1->setEdgeWeights(edgeSumMin, edgeSumMax, m_b1->bbJumpDest); } else { edge1->setEdgeWeights(BB_ZERO_WEIGHT, BB_MAX_WEIGHT, m_b1->bbJumpDest); } } /* Modify the target of the conditional jump and update bbRefs and bbPreds */ if (optReturnBlock) { m_b1->bbJumpDest = nullptr; m_b1->bbJumpKind = BBJ_RETURN; #ifdef DEBUG m_b1->bbJumpSwt = m_b2->bbJumpSwt; #endif assert(m_b2->bbJumpKind == BBJ_RETURN); assert(m_b1->bbNext == m_b2); assert(m_b3 != nullptr); } else { assert(m_b1->bbJumpKind == BBJ_COND); assert(m_b2->bbJumpKind == BBJ_COND); assert(m_b1->bbJumpDest == m_b2->bbJumpDest); assert(m_b1->bbNext == m_b2); assert(m_b2->bbNext != nullptr); } if (!optReturnBlock) { // Update bbRefs and bbPreds // // Replace pred 'm_b2' for 'm_b2->bbNext' with 'm_b1' // Remove pred 'm_b2' for 'm_b2->bbJumpDest' m_comp->fgReplacePred(m_b2->bbNext, m_b2, m_b1); m_comp->fgRemoveRefPred(m_b2->bbJumpDest, m_b2); } // Get rid of the second block m_comp->fgUnlinkBlock(m_b2); m_b2->bbFlags |= BBF_REMOVED; // If m_b2 was the last block of a try or handler, update the EH table. m_comp->ehUpdateForDeletedBlock(m_b2); if (optReturnBlock) { // Get rid of the third block m_comp->fgUnlinkBlock(m_b3); m_b3->bbFlags |= BBF_REMOVED; // If m_b3 was the last block of a try or handler, update the EH table. m_comp->ehUpdateForDeletedBlock(m_b3); } // Update loop table m_comp->fgUpdateLoopsAfterCompacting(m_b1, m_b2); if (optReturnBlock) { m_comp->fgUpdateLoopsAfterCompacting(m_b1, m_b3); } } //----------------------------------------------------------------------------- // optOptimizeBoolsReturnBlock: Optimize boolean when m_b1 is BBJ_COND and m_b2 and m_b3 are BBJ_RETURN // // Arguments: // b3: Pointer to basic block b3 // // Returns: // true if boolean optimization is done and m_b1, m_b2 and m_b3 are folded into m_b1, else false. // // Notes: // m_b1, m_b2 and m_b3 of OptBoolsDsc are set on entry. // // if B1.bbJumpDest == b3, it transforms // B1 : brtrue(t1, B3) // B2 : ret(t2) // B3 : ret(0) // to // B1 : ret((!t1) && t2) // // For example, (x==0 && y==0) generates: // B1: GT_JTRUE (BBJ_COND), jumps to B3 // B2: GT_RETURN (BBJ_RETURN) // B3: GT_RETURN (BBJ_RETURN), // and it is folded into // B1: GT_RETURN (BBJ_RETURN) // bool OptBoolsDsc::optOptimizeBoolsReturnBlock(BasicBlock* b3) { assert(m_b1 != nullptr && m_b2 != nullptr); // m_b3 is set for cond/return/return case m_b3 = b3; m_sameTarget = false; Statement* const s1 = optOptimizeBoolsChkBlkCond(); if (s1 == nullptr) { return false; } // Find the branch conditions of m_b1 and m_b2 m_c1 = optIsBoolComp(&m_testInfo1); if (m_c1 == nullptr) { return false; } m_c2 = optIsBoolComp(&m_testInfo2); if (m_c2 == nullptr) { return false; } // Find the type and cost conditions of m_testInfo1 and m_testInfo2 if (!optOptimizeBoolsChkTypeCostCond()) { return false; } // Get the fold operator (m_foldOp, e.g., GT_OR/GT_AND) and // the comparison operator (m_cmpOp, e.g., GT_EQ/GT_NE) var_types foldType = m_c1->TypeGet(); if (varTypeIsGC(foldType)) { foldType = TYP_I_IMPL; } m_foldType = foldType; m_foldOp = GT_NONE; m_cmpOp = GT_NONE; genTreeOps foldOp; genTreeOps cmpOp; ssize_t it1val = m_testInfo1.compTree->AsOp()->gtOp2->AsIntCon()->gtIconVal; ssize_t it2val = m_testInfo2.compTree->AsOp()->gtOp2->AsIntCon()->gtIconVal; ssize_t it3val = m_t3->AsOp()->gtOp1->AsIntCon()->gtIconVal; if ((m_testInfo1.compTree->gtOper == GT_NE && m_testInfo2.compTree->gtOper == GT_EQ) && (it1val == 0 && it2val == 0 && it3val == 0)) { // Case: x == 0 && y == 0 // t1:c1!=0 t2:c2==0 t3:c3==0 // ==> true if (c1|c2)==0 foldOp = GT_OR; cmpOp = GT_EQ; } else if ((m_testInfo1.compTree->gtOper == GT_EQ && m_testInfo2.compTree->gtOper == GT_NE) && (it1val == 0 && it2val == 0 && it3val == 0)) { // Case: x == 1 && y ==1 // t1:c1!=1 t2:c2==1 t3:c3==0 is reversed from optIsBoolComp() to: t1:c1==0 t2:c2!=0 t3:c3==0 // ==> true if (c1&c2)!=0 foldOp = GT_AND; cmpOp = GT_NE; } else if ((m_testInfo1.compTree->gtOper == GT_EQ && m_testInfo2.compTree->gtOper == GT_EQ) && (it1val == 0 && it2val == 0 && it3val == 1)) { // Case: x == 0 || y == 0 // t1:c1==0 t2:c2==0 t3:c3==1 // ==> true if (c1&c2)==0 foldOp = GT_AND; cmpOp = GT_EQ; } else if ((m_testInfo1.compTree->gtOper == GT_NE && m_testInfo2.compTree->gtOper == GT_NE) && (it1val == 0 && it2val == 0 && it3val == 1)) { // Case: x == 1 || y == 1 // t1:c1==1 t2:c2==1 t3:c3==1 is reversed from optIsBoolComp() to: t1:c1!=0 t2:c2!=0 t3:c3==1 // ==> true if (c1|c2)!=0 foldOp = GT_OR; cmpOp = GT_NE; } else { // Require NOT operation for operand(s). Do Not fold. return false; } if ((foldOp == GT_AND || cmpOp == GT_NE) && (!m_testInfo1.isBool || !m_testInfo2.isBool)) { // x == 1 && y == 1: Skip cases where x or y is greather than 1, e.g., x=3, y=1 // x == 0 || y == 0: Skip cases where x and y have opposite bits set, e.g., x=2, y=1 // x == 1 || y == 1: Skip cases where either x or y is greater than 1, e.g., x=2, y=0 return false; } m_foldOp = foldOp; m_cmpOp = cmpOp; // Now update the trees optOptimizeBoolsUpdateTrees(); #ifdef DEBUG if (m_comp->verbose) { printf("Folded %sboolean conditions of " FMT_BB ", " FMT_BB " and " FMT_BB " to :\n", m_c2->OperIsLeaf() ? "" : "non-leaf ", m_b1->bbNum, m_b2->bbNum, m_b3->bbNum); m_comp->gtDispStmt(s1); printf("\n"); } #endif // Return true to continue the bool optimization for the rest of the BB chain return true; } //----------------------------------------------------------------------------- // optOptimizeBoolsGcStress: Replace x==null with (x|x)==0 if x is a GC-type. // This will stress code-gen and the emitter to make sure they support such trees. // #ifdef DEBUG void OptBoolsDsc::optOptimizeBoolsGcStress() { if (!m_comp->compStressCompile(m_comp->STRESS_OPT_BOOLS_GC, 20)) { return; } assert(m_b1->bbJumpKind == BBJ_COND); GenTree* cond = m_b1->lastStmt()->GetRootNode(); assert(cond->gtOper == GT_JTRUE); OptTestInfo test; test.testTree = cond; GenTree* comparand = optIsBoolComp(&test); if (comparand == nullptr || !varTypeIsGC(comparand->TypeGet())) { return; } GenTree* relop = test.compTree; bool isBool = test.isBool; if (comparand->gtFlags & (GTF_ASG | GTF_CALL | GTF_ORDER_SIDEEFF)) { return; } GenTree* comparandClone = m_comp->gtCloneExpr(comparand); noway_assert(relop->AsOp()->gtOp1 == comparand); genTreeOps oper = m_comp->compStressCompile(m_comp->STRESS_OPT_BOOLS_GC, 50) ? GT_OR : GT_AND; relop->AsOp()->gtOp1 = m_comp->gtNewOperNode(oper, TYP_I_IMPL, comparand, comparandClone); // Comparand type is already checked, and we have const int, there is no harm // morphing it into a TYP_I_IMPL. noway_assert(relop->AsOp()->gtOp2->gtOper == GT_CNS_INT); relop->AsOp()->gtOp2->gtType = TYP_I_IMPL; } #endif //----------------------------------------------------------------------------- // optIsBoolComp: Function used by folding of boolean conditionals // // Arguments: // pOptTest The test info for the test tree // // Return: // On success, return the first operand (gtOp1) of compTree, else return nullptr. // // Notes: // On entry, testTree is set. // On success, compTree is set to the compare node (i.e. GT_EQ or GT_NE) of the testTree. // isBool is set to true if the comparand (i.e., operand 1 of compTree is boolean. Otherwise, false. // // Given a GT_JTRUE or GT_RETURN node, this method checks if it is a boolean comparison // of the form "if (boolVal ==/!= 0/1)".This is translated into // a GT_EQ/GT_NE node with "opr1" being a boolean lclVar and "opr2" the const 0/1. // // When isBool == true, if the comparison was against a 1 (i.e true) // then we morph the tree by reversing the GT_EQ/GT_NE and change the 1 to 0. // GenTree* OptBoolsDsc::optIsBoolComp(OptTestInfo* pOptTest) { pOptTest->isBool = false; assert(pOptTest->testTree->gtOper == GT_JTRUE || pOptTest->testTree->gtOper == GT_RETURN); GenTree* cond = pOptTest->testTree->AsOp()->gtOp1; // The condition must be "!= 0" or "== 0" if ((cond->gtOper != GT_EQ) && (cond->gtOper != GT_NE)) { return nullptr; } // Return the compare node to the caller pOptTest->compTree = cond; // Get hold of the comparands GenTree* opr1 = cond->AsOp()->gtOp1; GenTree* opr2 = cond->AsOp()->gtOp2; if (opr2->gtOper != GT_CNS_INT) { return nullptr; } if (!opr2->IsIntegralConst(0) && !opr2->IsIntegralConst(1)) { return nullptr; } ssize_t ival2 = opr2->AsIntCon()->gtIconVal; // Is the value a boolean? // We can either have a boolean expression (marked GTF_BOOLEAN) or // a local variable that is marked as being boolean (lvIsBoolean) if (opr1->gtFlags & GTF_BOOLEAN) { pOptTest->isBool = true; } else if ((opr1->gtOper == GT_CNS_INT) && (opr1->IsIntegralConst(0) || opr1->IsIntegralConst(1))) { pOptTest->isBool = true; } else if (opr1->gtOper == GT_LCL_VAR) { // is it a boolean local variable? unsigned lclNum = opr1->AsLclVarCommon()->GetLclNum(); noway_assert(lclNum < m_comp->lvaCount); if (m_comp->lvaTable[lclNum].lvIsBoolean) { pOptTest->isBool = true; } } // Was our comparison against the constant 1 (i.e. true) if (ival2 == 1) { // If this is a boolean expression tree we can reverse the relop // and change the true to false. if (pOptTest->isBool) { m_comp->gtReverseCond(cond); opr2->AsIntCon()->gtIconVal = 0; } else { return nullptr; } } return opr1; } //----------------------------------------------------------------------------- // optOptimizeBools: Folds boolean conditionals for GT_JTRUE/GT_RETURN nodes // // Notes: // If the operand of GT_JTRUE/GT_RETURN node is GT_EQ/GT_NE of the form // "if (boolVal ==/!= 0/1)", the GT_EQ/GT_NE nodes are translated into a // GT_EQ/GT_NE node with // "op1" being a boolean GT_OR/GT_AND lclVar and // "op2" the const 0/1. // For example, the folded tree for the below boolean optimization is shown below: // Case 1: (x == 0 && y ==0) => (x | y) == 0 // * RETURN int // \--* EQ int // +--* OR int // | +--* LCL_VAR int V00 arg0 // | \--* LCL_VAR int V01 arg1 // \--* CNS_INT int 0 // // Case 2: (x == null && y == null) ==> (x | y) == 0 // * RETURN int // \-- * EQ int // + -- * OR long // | +-- * LCL_VAR ref V00 arg0 // | \-- * LCL_VAR ref V01 arg1 // \-- * CNS_INT long 0 // // Case 3: (x == 0 && y == 0 && z == 0) ==> ((x | y) | z) == 0 // * RETURN int // \-- * EQ int // + -- * OR int // | +-- * OR int // | | +-- * LCL_VAR int V00 arg0 // | | \-- * LCL_VAR int V01 arg1 // | \-- * LCL_VAR int V02 arg2 // \-- * CNS_INT int 0 // // Case 4: (x == 0 && y == 0 && z == 0 && w == 0) ==> (((x | y) | z) | w) == 0 // * RETURN int // \-- * EQ int // + * OR int // | +--* OR int // | | +--* OR int // | | | +--* LCL_VAR int V00 arg0 // | | | \--* LCL_VAR int V01 arg1 // | | \--* LCL_VAR int V02 arg2 // | \--* LCL_VAR int V03 arg3 // \--* CNS_INT int 0 // // Patterns that are not optimized include (x == 1 && y == 1), (x == 1 || y == 1), // (x == 0 || y == 0) because currently their comptree is not marked as boolean expression. // When m_foldOp == GT_AND or m_cmpOp == GT_NE, both compTrees must be boolean expression // in order to skip below cases when compTree is not boolean expression: // - x == 1 && y == 1 ==> (x&y)!=0: Skip cases where x or y is greather than 1, e.g., x=3, y=1 // - x == 1 || y == 1 ==> (x|y)!=0: Skip cases where either x or y is greater than 1, e.g., x=2, y=0 // - x == 0 || y == 0 ==> (x&y)==0: Skip cases where x and y have opposite bits set, e.g., x=2, y=1 // void Compiler::optOptimizeBools() { #ifdef DEBUG if (verbose) { printf("*************** In optOptimizeBools()\n"); if (verboseTrees) { printf("Blocks/Trees before phase\n"); fgDispBasicBlocks(true); } } #endif bool change; do { change = false; for (BasicBlock* const b1 : Blocks()) { // We're only interested in conditional jumps here if (b1->bbJumpKind != BBJ_COND) { continue; } // If there is no next block, we're done BasicBlock* b2 = b1->bbNext; if (b2 == nullptr) { break; } // The next block must not be marked as BBF_DONT_REMOVE if (b2->bbFlags & BBF_DONT_REMOVE) { continue; } OptBoolsDsc optBoolsDsc(b1, b2, this); // The next block needs to be a condition or return block. if (b2->bbJumpKind == BBJ_COND) { if ((b1->bbJumpDest != b2->bbJumpDest) && (b1->bbJumpDest != b2->bbNext)) { continue; } // When it is conditional jumps if (optBoolsDsc.optOptimizeBoolsCondBlock()) { change = true; } } else if (b2->bbJumpKind == BBJ_RETURN) { // Set b3 to b1 jump destination BasicBlock* b3 = b1->bbJumpDest; // b3 must not be marked as BBF_DONT_REMOVE if (b3->bbFlags & BBF_DONT_REMOVE) { continue; } // b3 must be RETURN type if (b3->bbJumpKind != BBJ_RETURN) { continue; } if (optBoolsDsc.optOptimizeBoolsReturnBlock(b3)) { change = true; } } else { #ifdef DEBUG optBoolsDsc.optOptimizeBoolsGcStress(); #endif } } } while (change); #ifdef DEBUG fgDebugCheckBBlist(); #endif } typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, unsigned> LclVarRefCounts; //------------------------------------------------------------------------------------------ // optRemoveRedundantZeroInits: Remove redundant zero intializations. // // Notes: // This phase iterates over basic blocks starting with the first basic block until there is no unique // basic block successor or until it detects a loop. It keeps track of local nodes it encounters. // When it gets to an assignment to a local variable or a local field, it checks whether the assignment // is the first reference to the local (or to the parent of the local field), and, if so, // it may do one of two optimizations: // 1. If the following conditions are true: // the local is untracked, // the rhs of the assignment is 0, // the local is guaranteed to be fully initialized in the prolog, // then the explicit zero initialization is removed. // 2. If the following conditions are true: // the assignment is to a local (and not a field), // the local is not lvLiveInOutOfHndlr or no exceptions can be thrown between the prolog and the assignment, // either the local has no gc pointers or there are no gc-safe points between the prolog and the assignment, // then the local is marked with lvHasExplicitInit which tells the codegen not to insert zero initialization // for this local in the prolog. void Compiler::optRemoveRedundantZeroInits() { #ifdef DEBUG if (verbose) { printf("*************** In optRemoveRedundantZeroInits()\n"); } #endif // DEBUG CompAllocator allocator(getAllocator(CMK_ZeroInit)); LclVarRefCounts refCounts(allocator); BitVecTraits bitVecTraits(lvaCount, this); BitVec zeroInitLocals = BitVecOps::MakeEmpty(&bitVecTraits); bool hasGCSafePoint = false; bool canThrow = false; assert(fgStmtListThreaded); for (BasicBlock* block = fgFirstBB; (block != nullptr) && ((block->bbFlags & BBF_MARKED) == 0); block = block->GetUniqueSucc()) { block->bbFlags |= BBF_MARKED; CompAllocator allocator(getAllocator(CMK_ZeroInit)); LclVarRefCounts defsInBlock(allocator); bool removedTrackedDefs = false; for (Statement* stmt = block->FirstNonPhiDef(); stmt != nullptr;) { Statement* next = stmt->GetNextStmt(); for (GenTree* const tree : stmt->TreeList()) { if (((tree->gtFlags & GTF_CALL) != 0)) { hasGCSafePoint = true; } if ((tree->gtFlags & GTF_EXCEPT) != 0) { canThrow = true; } switch (tree->gtOper) { case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: { unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); unsigned* pRefCount = refCounts.LookupPointer(lclNum); if (pRefCount != nullptr) { *pRefCount = (*pRefCount) + 1; } else { refCounts.Set(lclNum, 1); } if ((tree->gtFlags & GTF_VAR_DEF) == 0) { break; } // We need to count the number of tracked var defs in the block // so that we can update block->bbVarDef if we remove any tracked var defs. LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (lclDsc->lvTracked) { unsigned* pDefsCount = defsInBlock.LookupPointer(lclNum); if (pDefsCount != nullptr) { *pDefsCount = (*pDefsCount) + 1; } else { defsInBlock.Set(lclNum, 1); } } else if (varTypeIsStruct(lclDsc) && ((tree->gtFlags & GTF_VAR_USEASG) == 0) && lvaGetPromotionType(lclDsc) != PROMOTION_TYPE_NONE) { for (unsigned i = lclDsc->lvFieldLclStart; i < lclDsc->lvFieldLclStart + lclDsc->lvFieldCnt; ++i) { if (lvaGetDesc(i)->lvTracked) { unsigned* pDefsCount = defsInBlock.LookupPointer(i); if (pDefsCount != nullptr) { *pDefsCount = (*pDefsCount) + 1; } else { defsInBlock.Set(i, 1); } } } } break; } // case GT_CALL: // TODO-CQ: Need to remove redundant zero-inits for "return buffer". // assert(!"Need to handle zero inits.\n"); // break; case GT_ASG: { GenTreeOp* treeOp = tree->AsOp(); GenTreeLclVarCommon* lclVar; bool isEntire; if (!tree->DefinesLocal(this, &lclVar, &isEntire)) { break; } const unsigned lclNum = lclVar->GetLclNum(); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); unsigned* pRefCount = refCounts.LookupPointer(lclNum); // pRefCount can't be null because the local node on the lhs of the assignment // must have already been seen. assert(pRefCount != nullptr); if (*pRefCount != 1) { break; } unsigned parentRefCount = 0; if (lclDsc->lvIsStructField && refCounts.Lookup(lclDsc->lvParentLcl, &parentRefCount) && (parentRefCount != 0)) { break; } unsigned fieldRefCount = 0; if (lclDsc->lvPromoted) { for (unsigned i = lclDsc->lvFieldLclStart; (fieldRefCount == 0) && (i < lclDsc->lvFieldLclStart + lclDsc->lvFieldCnt); ++i) { refCounts.Lookup(i, &fieldRefCount); } } if (fieldRefCount != 0) { break; } // The local hasn't been referenced before this assignment. bool removedExplicitZeroInit = false; if (treeOp->gtGetOp2()->IsIntegralConst(0)) { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; if (!bbInALoop || bbIsReturn) { if (BitVecOps::IsMember(&bitVecTraits, zeroInitLocals, lclNum) || (lclDsc->lvIsStructField && BitVecOps::IsMember(&bitVecTraits, zeroInitLocals, lclDsc->lvParentLcl)) || ((!lclDsc->lvTracked || !isEntire) && !fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn))) { // We are guaranteed to have a zero initialization in the prolog or a // dominating explicit zero initialization and the local hasn't been redefined // between the prolog and this explicit zero initialization so the assignment // can be safely removed. if (tree == stmt->GetRootNode()) { fgRemoveStmt(block, stmt); removedExplicitZeroInit = true; lclDsc->lvSuppressedZeroInit = 1; if (lclDsc->lvTracked) { removedTrackedDefs = true; unsigned* pDefsCount = defsInBlock.LookupPointer(lclNum); *pDefsCount = (*pDefsCount) - 1; } } } if (isEntire) { BitVecOps::AddElemD(&bitVecTraits, zeroInitLocals, lclNum); } *pRefCount = 0; } } if (!removedExplicitZeroInit && isEntire && (!canThrow || !lclDsc->lvLiveInOutOfHndlr)) { // If compMethodRequiresPInvokeFrame() returns true, lower may later // insert a call to CORINFO_HELP_INIT_PINVOKE_FRAME which is a gc-safe point. if (!lclDsc->HasGCPtr() || (!GetInterruptible() && !hasGCSafePoint && !compMethodRequiresPInvokeFrame())) { // The local hasn't been used and won't be reported to the gc between // the prolog and this explicit intialization. Therefore, it doesn't // require zero initialization in the prolog. lclDsc->lvHasExplicitInit = 1; JITDUMP("Marking " FMT_LP " as having an explicit init\n", lclNum); } } break; } default: break; } } stmt = next; } if (removedTrackedDefs) { LclVarRefCounts::KeyIterator iter(defsInBlock.Begin()); LclVarRefCounts::KeyIterator end(defsInBlock.End()); for (; !iter.Equal(end); iter++) { unsigned int lclNum = iter.Get(); if (defsInBlock[lclNum] == 0) { VarSetOps::RemoveElemD(this, block->bbVarDef, lvaGetDesc(lclNum)->lvVarIndex); } } } } for (BasicBlock* block = fgFirstBB; (block != nullptr) && ((block->bbFlags & BBF_MARKED) != 0); block = block->GetUniqueSucc()) { block->bbFlags &= ~BBF_MARKED; } } #ifdef DEBUG //------------------------------------------------------------------------ // optAnyChildNotRemoved: Recursively check the child loops of a loop to see if any of them // are still live (that is, not marked as LPFLG_REMOVED). This check is done when we are // removing a parent, just to notify that there is something odd about leaving a live child. // // Arguments: // loopNum - the loop number to check // bool Compiler::optAnyChildNotRemoved(unsigned loopNum) { assert(loopNum < optLoopCount); // Now recursively mark the children. for (BasicBlock::loopNumber l = optLoopTable[loopNum].lpChild; // l != BasicBlock::NOT_IN_LOOP; // l = optLoopTable[l].lpSibling) { if ((optLoopTable[l].lpFlags & LPFLG_REMOVED) == 0) { return true; } if (optAnyChildNotRemoved(l)) { return true; } } // All children were removed return false; } #endif // DEBUG //------------------------------------------------------------------------ // optMarkLoopRemoved: Mark the specified loop as removed (some optimization, such as unrolling, has made the // loop no longer exist). Note that only the given loop is marked as being removed; if it has any children, // they are not touched (but a warning message is output to the JitDump). // // Arguments: // loopNum - the loop number to remove // void Compiler::optMarkLoopRemoved(unsigned loopNum) { JITDUMP("Marking loop " FMT_LP " removed\n", loopNum); assert(loopNum < optLoopCount); LoopDsc& loop = optLoopTable[loopNum]; loop.lpFlags |= LPFLG_REMOVED; #ifdef DEBUG if (optAnyChildNotRemoved(loopNum)) { JITDUMP("Removed loop " FMT_LP " has one or more live children\n", loopNum); } // Note: we can't call `fgDebugCheckLoopTable()` here because if there are live children, it will assert. // Assume the caller is going to fix up the table and `bbNatLoopNum` block annotations before the next time // `fgDebugCheckLoopTable()` is called. #endif // DEBUG }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Optimizer XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif /*****************************************************************************/ void Compiler::optInit() { optLoopsMarked = false; fgHasLoops = false; loopAlignCandidates = 0; /* Initialize the # of tracked loops to 0 */ optLoopCount = 0; optLoopTable = nullptr; optCurLoopEpoch = 0; #ifdef DEBUG loopsAligned = 0; #endif /* Keep track of the number of calls and indirect calls made by this method */ optCallCount = 0; optIndirectCallCount = 0; optNativeCallCount = 0; optAssertionCount = 0; optAssertionDep = nullptr; optCSEstart = BAD_VAR_NUM; optCSEcount = 0; } DataFlow::DataFlow(Compiler* pCompiler) : m_pCompiler(pCompiler) { } //------------------------------------------------------------------------ // optSetBlockWeights: adjust block weights, as follows: // 1. A block that is not reachable from the entry block is marked "run rarely". // 2. If we're not using profile weights, then any block with a non-zero weight // that doesn't dominate all the return blocks has its weight dropped in half // (but only if the first block *does* dominate all the returns). // // Notes: // Depends on dominators, and fgReturnBlocks being set. // PhaseStatus Compiler::optSetBlockWeights() { noway_assert(opts.OptimizationEnabled()); assert(fgDomsComputed); assert(fgReturnBlocksComputed); #ifdef DEBUG bool changed = false; #endif bool firstBBDominatesAllReturns = true; const bool usingProfileWeights = fgIsUsingProfileWeights(); for (BasicBlock* const block : Blocks()) { /* Blocks that can't be reached via the first block are rarely executed */ if (!fgReachable(fgFirstBB, block)) { block->bbSetRunRarely(); } if (!usingProfileWeights && firstBBDominatesAllReturns) { // If the weight is already zero (and thus rarely run), there's no point scaling it. if (block->bbWeight != BB_ZERO_WEIGHT) { // If the block dominates all return blocks, leave the weight alone. Otherwise, // scale the weight by 0.5 as a heuristic that some other path gets some of the dynamic flow. // Note that `optScaleLoopBlocks` has a similar heuristic for loop blocks that don't dominate // their loop back edge. bool blockDominatesAllReturns = true; // Assume that we will dominate for (BasicBlockList* retBlocks = fgReturnBlocks; retBlocks != nullptr; retBlocks = retBlocks->next) { if (!fgDominate(block, retBlocks->block)) { blockDominatesAllReturns = false; break; } } if (block == fgFirstBB) { firstBBDominatesAllReturns = blockDominatesAllReturns; // Don't scale the weight of the first block, since it is guaranteed to execute. // If the first block does not dominate all the returns, we won't scale any of the function's // block weights. } else { // If we are not using profile weight then we lower the weight // of blocks that do not dominate a return block // if (!blockDominatesAllReturns) { INDEBUG(changed = true); // TODO-Cleanup: we should use: // block->scaleBBWeight(0.5); // since we are inheriting "from ourselves", but that leads to asm diffs due to minutely // different floating-point value in the calculation, and some code that compares weights // for equality. block->inheritWeightPercentage(block, 50); } } } } } #if DEBUG if (changed && verbose) { printf("\nAfter optSetBlockWeights:\n"); fgDispBasicBlocks(); printf("\n"); } /* Check that the flowgraph data (bbNum, bbRefs, bbPreds) is up-to-date */ fgDebugCheckBBlist(); #endif return PhaseStatus::MODIFIED_EVERYTHING; } //------------------------------------------------------------------------ // optScaleLoopBlocks: Scale the weight of loop blocks from 'begBlk' to 'endBlk'. // // Arguments: // begBlk - first block of range. Must be marked as a loop head (BBF_LOOP_HEAD). // endBlk - last block of range (inclusive). Must be reachable from `begBlk`. // // Operation: // Calculate the 'loop weight'. This is the amount to scale the weight of each block in the loop. // Our heuristic is that loops are weighted eight times more than straight-line code // (scale factor is BB_LOOP_WEIGHT_SCALE). If the loops are all properly formed this gives us these weights: // // 1 -- non-loop basic block // 8 -- single loop nesting // 64 -- double loop nesting // 512 -- triple loop nesting // void Compiler::optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk) { noway_assert(begBlk->bbNum <= endBlk->bbNum); noway_assert(begBlk->isLoopHead()); noway_assert(fgReachable(begBlk, endBlk)); noway_assert(!opts.MinOpts()); #ifdef DEBUG if (verbose) { printf("\nMarking a loop from " FMT_BB " to " FMT_BB, begBlk->bbNum, endBlk->bbNum); } #endif // Build list of back edges for block begBlk. flowList* backedgeList = nullptr; for (BasicBlock* const predBlock : begBlk->PredBlocks()) { // Is this a back edge? if (predBlock->bbNum >= begBlk->bbNum) { backedgeList = new (this, CMK_FlowList) flowList(predBlock, backedgeList); #if MEASURE_BLOCK_SIZE genFlowNodeCnt += 1; genFlowNodeSize += sizeof(flowList); #endif // MEASURE_BLOCK_SIZE } } // At least one backedge must have been found (the one from endBlk). noway_assert(backedgeList); auto reportBlockWeight = [&](BasicBlock* blk, const char* message) { #ifdef DEBUG if (verbose) { printf("\n " FMT_BB "(wt=" FMT_WT ")%s", blk->bbNum, blk->getBBWeight(this), message); } #endif // DEBUG }; for (BasicBlock* const curBlk : BasicBlockRangeList(begBlk, endBlk)) { // Don't change the block weight if it came from profile data. if (curBlk->hasProfileWeight()) { reportBlockWeight(curBlk, "; unchanged: has profile weight"); continue; } // Don't change the block weight if it's known to be rarely run. if (curBlk->isRunRarely()) { reportBlockWeight(curBlk, "; unchanged: run rarely"); continue; } // For curBlk to be part of a loop that starts at begBlk, curBlk must be reachable from begBlk and // (since this is a loop) begBlk must likewise be reachable from curBlk. if (fgReachable(curBlk, begBlk) && fgReachable(begBlk, curBlk)) { // If `curBlk` reaches any of the back edge blocks we set `reachable`. // If `curBlk` dominates any of the back edge blocks we set `dominates`. bool reachable = false; bool dominates = false; for (flowList* tmp = backedgeList; tmp != nullptr; tmp = tmp->flNext) { BasicBlock* backedge = tmp->getBlock(); reachable |= fgReachable(curBlk, backedge); dominates |= fgDominate(curBlk, backedge); if (dominates && reachable) { // No need to keep looking; we've already found all the info we need. break; } } if (reachable) { // If the block has BB_ZERO_WEIGHT, then it should be marked as rarely run, and skipped, above. noway_assert(curBlk->bbWeight > BB_ZERO_WEIGHT); weight_t scale = BB_LOOP_WEIGHT_SCALE; if (!dominates) { // If `curBlk` reaches but doesn't dominate any back edge to `endBlk` then there must be at least // some other path to `endBlk`, so don't give `curBlk` all the execution weight. scale = scale / 2; } curBlk->scaleBBWeight(scale); reportBlockWeight(curBlk, ""); } else { reportBlockWeight(curBlk, "; unchanged: back edge unreachable"); } } else { reportBlockWeight(curBlk, "; unchanged: block not in loop"); } } } //------------------------------------------------------------------------ // optUnmarkLoopBlocks: Unmark the blocks between 'begBlk' and 'endBlk' as part of a loop. // // Arguments: // begBlk - first block of range. Must be marked as a loop head (BBF_LOOP_HEAD). // endBlk - last block of range (inclusive). Must be reachable from `begBlk`. // // Operation: // A set of blocks that were previously marked as a loop are now to be unmarked, since we have decided that // for some reason this loop no longer exists. Basically we are just resetting the blocks bbWeight to their // previous values. // void Compiler::optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk) { noway_assert(begBlk->bbNum <= endBlk->bbNum); noway_assert(begBlk->isLoopHead()); noway_assert(!opts.MinOpts()); unsigned backEdgeCount = 0; for (BasicBlock* const predBlock : begBlk->PredBlocks()) { // Is this a backward edge? (from predBlock to begBlk) if (begBlk->bbNum > predBlock->bbNum) { continue; } // We only consider back-edges that are BBJ_COND or BBJ_ALWAYS for loops. if (!predBlock->KindIs(BBJ_COND, BBJ_ALWAYS)) { continue; } backEdgeCount++; } // Only unmark the loop blocks if we have exactly one loop back edge. if (backEdgeCount != 1) { #ifdef DEBUG if (verbose) { if (backEdgeCount > 0) { printf("\nNot removing loop at " FMT_BB ", due to an additional back edge", begBlk->bbNum); } else if (backEdgeCount == 0) { printf("\nNot removing loop at " FMT_BB ", due to no back edge", begBlk->bbNum); } } #endif return; } noway_assert(fgReachable(begBlk, endBlk)); #ifdef DEBUG if (verbose) { printf("\nUnmarking a loop from " FMT_BB " to " FMT_BB, begBlk->bbNum, endBlk->bbNum); } #endif for (BasicBlock* const curBlk : BasicBlockRangeList(begBlk, endBlk)) { // Stop if we go past the last block in the loop, as it may have been deleted. if (curBlk->bbNum > endBlk->bbNum) { break; } // Don't change the block weight if it's known to be rarely run. if (curBlk->isRunRarely()) { continue; } // Don't change the block weight if it came from profile data. if (curBlk->hasProfileWeight()) { continue; } // Don't unmark blocks that are maximum weight. if (curBlk->isMaxBBWeight()) { continue; } // For curBlk to be part of a loop that starts at begBlk, curBlk must be reachable from begBlk and // (since this is a loop) begBlk must likewise be reachable from curBlk. // if (fgReachable(curBlk, begBlk) && fgReachable(begBlk, curBlk)) { weight_t scale = 1.0 / BB_LOOP_WEIGHT_SCALE; if (!fgDominate(curBlk, endBlk)) { scale *= 2; } curBlk->scaleBBWeight(scale); JITDUMP("\n " FMT_BB "(wt=" FMT_WT ")", curBlk->bbNum, curBlk->getBBWeight(this)); } } JITDUMP("\n"); begBlk->unmarkLoopAlign(this DEBUG_ARG("Removed loop")); } /***************************************************************************************************** * * Function called to update the loop table and bbWeight before removing a block */ void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop) { if (!optLoopsMarked) { return; } noway_assert(!opts.MinOpts()); bool removeLoop = false; // If an unreachable block is a loop entry or bottom then the loop is unreachable. // Special case: the block was the head of a loop - or pointing to a loop entry. for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++) { LoopDsc& loop = optLoopTable[loopNum]; // Some loops may have been already removed by loop unrolling or conditional folding. if (loop.lpFlags & LPFLG_REMOVED) { continue; } // Avoid printing to the JitDump unless we're actually going to change something. // If we call reportBefore, then we're going to change the loop table, and we should print the // `reportAfter` info as well. Only print the `reportBefore` info once, if multiple changes to // the table are made. INDEBUG(bool reportedBefore = false); auto reportBefore = [&]() { #ifdef DEBUG if (verbose && !reportedBefore) { printf("optUpdateLoopsBeforeRemoveBlock " FMT_BB " Before: ", block->bbNum); optPrintLoopInfo(loopNum); printf("\n"); reportedBefore = true; } #endif // DEBUG }; auto reportAfter = [&]() { #ifdef DEBUG if (verbose && reportedBefore) { printf("optUpdateLoopsBeforeRemoveBlock " FMT_BB " After: ", block->bbNum); optPrintLoopInfo(loopNum); printf("\n"); } #endif // DEBUG }; if (block == loop.lpEntry || block == loop.lpBottom) { reportBefore(); optMarkLoopRemoved(loopNum); reportAfter(); continue; } // If the loop is still in the table any block in the loop must be reachable. noway_assert((loop.lpEntry != block) && (loop.lpBottom != block)); if (loop.lpExit == block) { reportBefore(); assert(loop.lpExitCnt == 1); --loop.lpExitCnt; loop.lpExit = nullptr; } // If `block` flows to the loop entry then the whole loop will become unreachable if it is the // only non-loop predecessor. switch (block->bbJumpKind) { case BBJ_NONE: if (block->bbNext == loop.lpEntry) { removeLoop = true; } break; case BBJ_COND: if ((block->bbNext == loop.lpEntry) || (block->bbJumpDest == loop.lpEntry)) { removeLoop = true; } break; case BBJ_ALWAYS: if (block->bbJumpDest == loop.lpEntry) { removeLoop = true; } break; case BBJ_SWITCH: for (BasicBlock* const bTarget : block->SwitchTargets()) { if (bTarget == loop.lpEntry) { removeLoop = true; break; } } break; default: break; } if (removeLoop) { // Check if the entry has other predecessors outside the loop. // TODO: Replace this when predecessors are available. for (BasicBlock* const auxBlock : Blocks()) { // Ignore blocks in the loop. if (loop.lpContains(auxBlock)) { continue; } switch (auxBlock->bbJumpKind) { case BBJ_NONE: if (auxBlock->bbNext == loop.lpEntry) { removeLoop = false; } break; case BBJ_COND: if ((auxBlock->bbNext == loop.lpEntry) || (auxBlock->bbJumpDest == loop.lpEntry)) { removeLoop = false; } break; case BBJ_ALWAYS: if (auxBlock->bbJumpDest == loop.lpEntry) { removeLoop = false; } break; case BBJ_SWITCH: for (BasicBlock* const bTarget : auxBlock->SwitchTargets()) { if (bTarget == loop.lpEntry) { removeLoop = false; break; } } break; default: break; } } if (removeLoop) { reportBefore(); optMarkLoopRemoved(loopNum); } } else if (loop.lpHead == block) { reportBefore(); /* The loop has a new head - Just update the loop table */ loop.lpHead = block->bbPrev; } reportAfter(); } if ((skipUnmarkLoop == false) && // block->KindIs(BBJ_ALWAYS, BBJ_COND) && // block->bbJumpDest->isLoopHead() && // (block->bbJumpDest->bbNum <= block->bbNum) && // fgDomsComputed && // (fgCurBBEpochSize == fgDomBBcount + 1) && // fgReachable(block->bbJumpDest, block)) { optUnmarkLoopBlocks(block->bbJumpDest, block); } } //------------------------------------------------------------------------ // optClearLoopIterInfo: Clear the info related to LPFLG_ITER loops in the loop table. // The various fields related to iterators is known to be valid for loop cloning and unrolling, // but becomes invalid afterwards. Clear the info that might be used incorrectly afterwards // in JitDump or by subsequent phases. // void Compiler::optClearLoopIterInfo() { for (unsigned lnum = 0; lnum < optLoopCount; lnum++) { LoopDsc& loop = optLoopTable[lnum]; loop.lpFlags &= ~(LPFLG_ITER | LPFLG_CONST_INIT | LPFLG_SIMD_LIMIT | LPFLG_VAR_LIMIT | LPFLG_CONST_LIMIT | LPFLG_ARRLEN_LIMIT); loop.lpIterTree = nullptr; loop.lpInitBlock = nullptr; loop.lpConstInit = -1; loop.lpTestTree = nullptr; } } #ifdef DEBUG /***************************************************************************** * * Print loop info in an uniform way. */ void Compiler::optPrintLoopInfo(const LoopDsc* loop, bool printVerbose /* = false */) { assert(optLoopTable != nullptr); assert((&optLoopTable[0] <= loop) && (loop < &optLoopTable[optLoopCount])); unsigned lnum = (unsigned)(loop - optLoopTable); assert(lnum < optLoopCount); assert(&optLoopTable[lnum] == loop); if (loop->lpFlags & LPFLG_REMOVED) { // If a loop has been removed, it might be dangerous to print its fields (e.g., loop unrolling // nulls out the lpHead field). printf(FMT_LP " REMOVED", lnum); return; } printf(FMT_LP ", from " FMT_BB " to " FMT_BB " (Head=" FMT_BB ", Entry=" FMT_BB, lnum, loop->lpTop->bbNum, loop->lpBottom->bbNum, loop->lpHead->bbNum, loop->lpEntry->bbNum); if (loop->lpExitCnt == 1) { printf(", Exit=" FMT_BB, loop->lpExit->bbNum); } else { printf(", ExitCnt=%d", loop->lpExitCnt); } if (loop->lpParent != BasicBlock::NOT_IN_LOOP) { printf(", parent=" FMT_LP, loop->lpParent); } printf(")"); if (printVerbose) { if (loop->lpChild != BasicBlock::NOT_IN_LOOP) { printf(", child loop = " FMT_LP, loop->lpChild); } if (loop->lpSibling != BasicBlock::NOT_IN_LOOP) { printf(", sibling loop = " FMT_LP, loop->lpSibling); } // If an iterator loop print the iterator and the initialization. if (loop->lpFlags & LPFLG_ITER) { printf(" [over V%02u", loop->lpIterVar()); printf(" ("); printf(GenTree::OpName(loop->lpIterOper())); printf(" %d)", loop->lpIterConst()); if (loop->lpFlags & LPFLG_CONST_INIT) { printf(" from %d", loop->lpConstInit); } if (loop->lpFlags & LPFLG_CONST_INIT) { if (loop->lpInitBlock != loop->lpHead) { printf(" (in " FMT_BB ")", loop->lpInitBlock->bbNum); } } // If a simple test condition print operator and the limits */ printf(" %s", GenTree::OpName(loop->lpTestOper())); if (loop->lpFlags & LPFLG_CONST_LIMIT) { printf(" %d", loop->lpConstLimit()); if (loop->lpFlags & LPFLG_SIMD_LIMIT) { printf(" (simd)"); } } if (loop->lpFlags & LPFLG_VAR_LIMIT) { printf(" V%02u", loop->lpVarLimit()); } if (loop->lpFlags & LPFLG_ARRLEN_LIMIT) { ArrIndex* index = new (getAllocator(CMK_DebugOnly)) ArrIndex(getAllocator(CMK_DebugOnly)); if (loop->lpArrLenLimit(this, index)) { printf(" "); index->Print(); printf(".Length"); } else { printf(" ???.Length"); } } printf("]"); } // Print the flags if (loop->lpFlags & LPFLG_CONTAINS_CALL) { printf(" call"); } if (loop->lpFlags & LPFLG_HAS_PREHEAD) { printf(" prehead"); } if (loop->lpFlags & LPFLG_DONT_UNROLL) { printf(" !unroll"); } if (loop->lpFlags & LPFLG_ASGVARS_YES) { printf(" avyes"); } if (loop->lpFlags & LPFLG_ASGVARS_INC) { printf(" avinc"); } } } void Compiler::optPrintLoopInfo(unsigned lnum, bool printVerbose /* = false */) { assert(lnum < optLoopCount); const LoopDsc& loop = optLoopTable[lnum]; optPrintLoopInfo(&loop, printVerbose); } //------------------------------------------------------------------------ // optPrintLoopTable: Print the loop table // void Compiler::optPrintLoopTable() { printf("\n*************** Natural loop table\n"); if (optLoopCount == 0) { printf("No loops\n"); } else { for (unsigned loopInd = 0; loopInd < optLoopCount; loopInd++) { optPrintLoopInfo(loopInd, /* verbose */ true); printf("\n"); } } printf("\n"); } #endif // DEBUG //------------------------------------------------------------------------ // optPopulateInitInfo: Populate loop init info in the loop table. // We assume the iteration variable is initialized already and check appropriately. // This only checks for the special case of a constant initialization. // // Arguments: // loopInd - loop index // initBlock - block in which the initialization lives. // init - the tree that is supposed to initialize the loop iterator. Might be nullptr. // iterVar - loop iteration variable. // // Return Value: // "true" if a constant initializer was found. // // Operation: // The 'init' tree is checked if its lhs is a local and rhs is a const. // bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar) { if (init == nullptr) { return false; } // Operator should be = if (init->gtOper != GT_ASG) { return false; } GenTree* lhs = init->AsOp()->gtOp1; GenTree* rhs = init->AsOp()->gtOp2; // LHS has to be local and should equal iterVar. if ((lhs->gtOper != GT_LCL_VAR) || (lhs->AsLclVarCommon()->GetLclNum() != iterVar)) { return false; } // RHS can be constant or local var. // TODO-CQ: CLONE: Add arr length for descending loops. if ((rhs->gtOper != GT_CNS_INT) || (rhs->TypeGet() != TYP_INT)) { return false; } // We found an initializer in the `head` block. For this to be used, we need to make sure the // "iterVar" initialization is never skipped. That is, every pred of ENTRY other than HEAD is in the loop. for (BasicBlock* const predBlock : optLoopTable[loopInd].lpEntry->PredBlocks()) { if ((predBlock != initBlock) && !optLoopTable[loopInd].lpContains(predBlock)) { JITDUMP(FMT_LP ": initialization not guaranteed through `head` block; ignore constant initializer\n", loopInd); return false; } } optLoopTable[loopInd].lpFlags |= LPFLG_CONST_INIT; optLoopTable[loopInd].lpConstInit = (int)rhs->AsIntCon()->gtIconVal; optLoopTable[loopInd].lpInitBlock = initBlock; return true; } //---------------------------------------------------------------------------------- // optCheckIterInLoopTest: Check if iter var is used in loop test. // // Arguments: // test "jtrue" tree or an asg of the loop iter termination condition // from/to blocks (beg, end) which are part of the loop. // iterVar loop iteration variable. // loopInd loop index. // // Operation: // The test tree is parsed to check if "iterVar" matches the lhs of the condition // and the rhs limit is extracted from the "test" tree. The limit information is // added to the loop table. // // Return Value: // "false" if the loop table could not be populated with the loop test info or // if the test condition doesn't involve iterVar. // bool Compiler::optCheckIterInLoopTest( unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar) { // Obtain the relop from the "test" tree. GenTree* relop; if (test->gtOper == GT_JTRUE) { relop = test->gtGetOp1(); } else { assert(test->gtOper == GT_ASG); relop = test->gtGetOp2(); } noway_assert(relop->OperIsCompare()); GenTree* opr1 = relop->AsOp()->gtOp1; GenTree* opr2 = relop->AsOp()->gtOp2; GenTree* iterOp; GenTree* limitOp; // Make sure op1 or op2 is the iterVar. if (opr1->gtOper == GT_LCL_VAR && opr1->AsLclVarCommon()->GetLclNum() == iterVar) { iterOp = opr1; limitOp = opr2; } else if (opr2->gtOper == GT_LCL_VAR && opr2->AsLclVarCommon()->GetLclNum() == iterVar) { iterOp = opr2; limitOp = opr1; } else { return false; } if (iterOp->gtType != TYP_INT) { return false; } // Mark the iterator node. iterOp->gtFlags |= GTF_VAR_ITERATOR; // Check what type of limit we have - constant, variable or arr-len. if (limitOp->gtOper == GT_CNS_INT) { optLoopTable[loopInd].lpFlags |= LPFLG_CONST_LIMIT; if ((limitOp->gtFlags & GTF_ICON_SIMD_COUNT) != 0) { optLoopTable[loopInd].lpFlags |= LPFLG_SIMD_LIMIT; } } else if (limitOp->gtOper == GT_LCL_VAR && !optIsVarAssigned(from, to, nullptr, limitOp->AsLclVarCommon()->GetLclNum())) { optLoopTable[loopInd].lpFlags |= LPFLG_VAR_LIMIT; } else if (limitOp->gtOper == GT_ARR_LENGTH) { optLoopTable[loopInd].lpFlags |= LPFLG_ARRLEN_LIMIT; } else { return false; } // Save the type of the comparison between the iterator and the limit. optLoopTable[loopInd].lpTestTree = relop; return true; } //---------------------------------------------------------------------------------- // optIsLoopIncrTree: Check if loop is a tree of form v += 1 or v = v + 1 // // Arguments: // incr The incr tree to be checked. Whether incr tree is // oper-equal(+=, -=...) type nodes or v=v+1 type ASG nodes. // // Operation: // The test tree is parsed to check if "iterVar" matches the lhs of the condition // and the rhs limit is extracted from the "test" tree. The limit information is // added to the loop table. // // Return Value: // iterVar local num if the iterVar is found, otherwise BAD_VAR_NUM. // unsigned Compiler::optIsLoopIncrTree(GenTree* incr) { GenTree* incrVal; genTreeOps updateOper; unsigned iterVar = incr->IsLclVarUpdateTree(&incrVal, &updateOper); if (iterVar != BAD_VAR_NUM) { // We have v = v op y type asg node. switch (updateOper) { case GT_ADD: case GT_SUB: case GT_MUL: case GT_RSH: case GT_LSH: break; default: return BAD_VAR_NUM; } // Increment should be by a const int. // TODO-CQ: CLONE: allow variable increments. if ((incrVal->gtOper != GT_CNS_INT) || (incrVal->TypeGet() != TYP_INT)) { return BAD_VAR_NUM; } } return iterVar; } //---------------------------------------------------------------------------------- // optComputeIterInfo: Check tree is loop increment of a lcl that is loop-invariant. // // Arguments: // from, to - are blocks (beg, end) which are part of the loop. // incr - tree that increments the loop iterator. v+=1 or v=v+1. // pIterVar - see return value. // // Return Value: // Returns true if iterVar "v" can be returned in "pIterVar", otherwise returns // false. // // Operation: // Check if the "incr" tree is a "v=v+1 or v+=1" type tree and make sure it is not // assigned in the loop. // bool Compiler::optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar) { unsigned iterVar = optIsLoopIncrTree(incr); if (iterVar == BAD_VAR_NUM) { return false; } if (optIsVarAssigned(from, to, incr, iterVar)) { JITDUMP("iterVar is assigned in loop\n"); return false; } *pIterVar = iterVar; return true; } //---------------------------------------------------------------------------------- // optIsLoopTestEvalIntoTemp: // Pattern match if the test tree is computed into a tmp // and the "tmp" is used as jump condition for loop termination. // // Arguments: // testStmt - is the JTRUE statement that is of the form: jmpTrue (Vtmp != 0) // where Vtmp contains the actual loop test result. // newTestStmt - contains the statement that is the actual test stmt involving // the loop iterator. // // Return Value: // Returns true if a new test tree can be obtained. // // Operation: // Scan if the current stmt is a jtrue with (Vtmp != 0) as condition // Then returns the rhs for def of Vtmp as the "test" node. // // Note: // This method just retrieves what it thinks is the "test" node, // the callers are expected to verify that "iterVar" is used in the test. // bool Compiler::optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt) { GenTree* test = testStmt->GetRootNode(); if (test->gtOper != GT_JTRUE) { return false; } GenTree* relop = test->gtGetOp1(); noway_assert(relop->OperIsCompare()); GenTree* opr1 = relop->AsOp()->gtOp1; GenTree* opr2 = relop->AsOp()->gtOp2; // Make sure we have jtrue (vtmp != 0) if ((relop->OperGet() == GT_NE) && (opr1->OperGet() == GT_LCL_VAR) && (opr2->OperGet() == GT_CNS_INT) && opr2->IsIntegralConst(0)) { // Get the previous statement to get the def (rhs) of Vtmp to see // if the "test" is evaluated into Vtmp. Statement* prevStmt = testStmt->GetPrevStmt(); if (prevStmt == nullptr) { return false; } GenTree* tree = prevStmt->GetRootNode(); if (tree->OperGet() == GT_ASG) { GenTree* lhs = tree->AsOp()->gtOp1; GenTree* rhs = tree->AsOp()->gtOp2; // Return as the new test node. if (lhs->gtOper == GT_LCL_VAR && lhs->AsLclVarCommon()->GetLclNum() == opr1->AsLclVarCommon()->GetLclNum()) { if (rhs->OperIsCompare()) { *newTestStmt = prevStmt; return true; } } } } return false; } //---------------------------------------------------------------------------------- // optExtractInitTestIncr: // Extract the "init", "test" and "incr" nodes of the loop. // // Arguments: // head - Loop head block // bottom - Loop bottom block // top - Loop top block // ppInit - The init stmt of the loop if found. // ppTest - The test stmt of the loop if found. // ppIncr - The incr stmt of the loop if found. // // Return Value: // The results are put in "ppInit", "ppTest" and "ppIncr" if the method // returns true. Returns false if the information can't be extracted. // Extracting the `init` is optional; if one is not found, *ppInit is set // to nullptr. Return value will never be false if `init` is not found. // // Operation: // Check if the "test" stmt is last stmt in the loop "bottom". If found good, // "test" stmt is found. Try to find the "incr" stmt. Check previous stmt of // "test" to get the "incr" stmt. If it is not found it could be a loop of the // below form. // // +-------<-----------------<-----------+ // | | // v | // BBinit(head) -> BBcond(top) -> BBLoopBody(bottom) ---^ // // Check if the "incr" tree is present in the loop "top" node as the last stmt. // Also check if the "test" tree is assigned to a tmp node and the tmp is used // in the jtrue condition. // // Note: // This method just retrieves what it thinks is the "test" node, // the callers are expected to verify that "iterVar" is used in the test. // bool Compiler::optExtractInitTestIncr( BasicBlock* head, BasicBlock* bottom, BasicBlock* top, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr) { assert(ppInit != nullptr); assert(ppTest != nullptr); assert(ppIncr != nullptr); // Check if last two statements in the loop body are the increment of the iterator // and the loop termination test. noway_assert(bottom->bbStmtList != nullptr); Statement* testStmt = bottom->lastStmt(); noway_assert(testStmt != nullptr && testStmt->GetNextStmt() == nullptr); Statement* newTestStmt; if (optIsLoopTestEvalIntoTemp(testStmt, &newTestStmt)) { testStmt = newTestStmt; } // Check if we have the incr stmt before the test stmt, if we don't, // check if incr is part of the loop "top". Statement* incrStmt = testStmt->GetPrevStmt(); if (incrStmt == nullptr || optIsLoopIncrTree(incrStmt->GetRootNode()) == BAD_VAR_NUM) { if (top == nullptr || top->bbStmtList == nullptr || top->bbStmtList->GetPrevStmt() == nullptr) { return false; } // If the prev stmt to loop test is not incr, then check if we have loop test evaluated into a tmp. Statement* toplastStmt = top->lastStmt(); if (optIsLoopIncrTree(toplastStmt->GetRootNode()) != BAD_VAR_NUM) { incrStmt = toplastStmt; } else { return false; } } assert(testStmt != incrStmt); // Find the last statement in the loop pre-header which we expect to be the initialization of // the loop iterator. Statement* phdrStmt = head->firstStmt(); if (phdrStmt != nullptr) { Statement* initStmt = phdrStmt->GetPrevStmt(); noway_assert(initStmt != nullptr && (initStmt->GetNextStmt() == nullptr)); // If it is a duplicated loop condition, skip it. if (initStmt->GetRootNode()->OperIs(GT_JTRUE)) { bool doGetPrev = true; #ifdef DEBUG if (opts.optRepeat) { // Previous optimization passes may have inserted compiler-generated // statements other than duplicated loop conditions. doGetPrev = (initStmt->GetPrevStmt() != nullptr); } else { // Must be a duplicated loop condition. noway_assert(initStmt->GetRootNode()->gtOper == GT_JTRUE); } #endif // DEBUG if (doGetPrev) { initStmt = initStmt->GetPrevStmt(); } noway_assert(initStmt != nullptr); } *ppInit = initStmt->GetRootNode(); } else { *ppInit = nullptr; } *ppTest = testStmt->GetRootNode(); *ppIncr = incrStmt->GetRootNode(); return true; } /***************************************************************************** * * Record the loop in the loop table. Return true if successful, false if * out of entries in loop table. */ bool Compiler::optRecordLoop( BasicBlock* head, BasicBlock* top, BasicBlock* entry, BasicBlock* bottom, BasicBlock* exit, unsigned char exitCnt) { if (exitCnt == 1) { noway_assert(exit != nullptr); } // Record this loop in the table, if there's room. assert(optLoopCount <= BasicBlock::MAX_LOOP_NUM); if (optLoopCount == BasicBlock::MAX_LOOP_NUM) { #if COUNT_LOOPS loopOverflowThisMethod = true; #endif return false; } // Assumed preconditions on the loop we're adding. assert(top->bbNum <= entry->bbNum); assert(entry->bbNum <= bottom->bbNum); assert(head->bbNum < top->bbNum || head->bbNum > bottom->bbNum); unsigned char loopInd = optLoopCount; if (optLoopTable == nullptr) { assert(loopInd == 0); optLoopTable = getAllocator(CMK_LoopOpt).allocate<LoopDsc>(BasicBlock::MAX_LOOP_NUM); NewLoopEpoch(); } else { // If the new loop contains any existing ones, add it in the right place. for (unsigned char prevPlus1 = optLoopCount; prevPlus1 > 0; prevPlus1--) { unsigned char prev = prevPlus1 - 1; if (optLoopTable[prev].lpContainedBy(top, bottom)) { loopInd = prev; } } // Move up any loops if necessary. for (unsigned j = optLoopCount; j > loopInd; j--) { optLoopTable[j] = optLoopTable[j - 1]; } } #ifdef DEBUG for (unsigned i = loopInd + 1; i < optLoopCount; i++) { // The loop is well-formed. assert(optLoopTable[i].lpWellFormed()); // Check for disjoint. if (optLoopTable[i].lpDisjoint(top, bottom)) { continue; } // Otherwise, assert complete containment (of optLoopTable[i] in new loop). assert(optLoopTable[i].lpContainedBy(top, bottom)); } #endif // DEBUG optLoopTable[loopInd].lpHead = head; optLoopTable[loopInd].lpTop = top; optLoopTable[loopInd].lpBottom = bottom; optLoopTable[loopInd].lpEntry = entry; optLoopTable[loopInd].lpExit = exit; optLoopTable[loopInd].lpExitCnt = exitCnt; optLoopTable[loopInd].lpParent = BasicBlock::NOT_IN_LOOP; optLoopTable[loopInd].lpChild = BasicBlock::NOT_IN_LOOP; optLoopTable[loopInd].lpSibling = BasicBlock::NOT_IN_LOOP; optLoopTable[loopInd].lpAsgVars = AllVarSetOps::UninitVal(); optLoopTable[loopInd].lpFlags = LPFLG_EMPTY; // We haven't yet recorded any side effects. for (MemoryKind memoryKind : allMemoryKinds()) { optLoopTable[loopInd].lpLoopHasMemoryHavoc[memoryKind] = false; } optLoopTable[loopInd].lpFieldsModified = nullptr; optLoopTable[loopInd].lpArrayElemTypesModified = nullptr; // // Try to find loops that have an iterator (i.e. for-like loops) "for (init; test; incr){ ... }" // We have the following restrictions: // 1. The loop condition must be a simple one i.e. only one JTRUE node // 2. There must be a loop iterator (a local var) that is // incremented (decremented or lsh, rsh, mul) with a constant value // 3. The iterator is incremented exactly once // 4. The loop condition must use the iterator. // 5. Finding a constant initializer is optional; if the initializer is not found, or is not constant, // it is still considered a for-like loop. // if (bottom->bbJumpKind == BBJ_COND) { GenTree* init; GenTree* test; GenTree* incr; if (!optExtractInitTestIncr(head, bottom, top, &init, &test, &incr)) { JITDUMP(FMT_LP ": couldn't find init/test/incr; not LPFLG_ITER loop\n", loopInd); goto DONE_LOOP; } unsigned iterVar = BAD_VAR_NUM; if (!optComputeIterInfo(incr, head->bbNext, bottom, &iterVar)) { JITDUMP(FMT_LP ": increment expression not appropriate form, or not loop invariant; not LPFLG_ITER loop\n", loopInd); goto DONE_LOOP; } optPopulateInitInfo(loopInd, head, init, iterVar); // Check that the iterator is used in the loop condition. if (!optCheckIterInLoopTest(loopInd, test, head->bbNext, bottom, iterVar)) { JITDUMP(FMT_LP ": iterator not used in loop condition; not LPFLG_ITER loop\n", loopInd); goto DONE_LOOP; } // We know the loop has an iterator at this point; flag it as LPFLG_ITER. JITDUMP(FMT_LP ": setting LPFLG_ITER\n", loopInd); optLoopTable[loopInd].lpFlags |= LPFLG_ITER; // Record iterator. optLoopTable[loopInd].lpIterTree = incr; #if COUNT_LOOPS iterLoopCount++; // Check if a constant iteration loop. if ((optLoopTable[loopInd].lpFlags & LPFLG_CONST_INIT) && (optLoopTable[loopInd].lpFlags & LPFLG_CONST_LIMIT)) { // This is a constant loop. constIterLoopCount++; } #endif } DONE_LOOP: bool loopInsertedAtEnd = (loopInd == optLoopCount); optLoopCount++; #ifdef DEBUG if (verbose) { printf("Recorded loop %s", loopInsertedAtEnd ? "" : "(extended) "); optPrintLoopInfo(loopInd, /* verbose */ true); printf("\n"); } #endif // DEBUG return true; } #ifdef DEBUG void Compiler::optCheckPreds() { for (BasicBlock* const block : Blocks()) { for (BasicBlock* const predBlock : block->PredBlocks()) { // make sure this pred is part of the BB list BasicBlock* bb; for (bb = fgFirstBB; bb; bb = bb->bbNext) { if (bb == predBlock) { break; } } noway_assert(bb); switch (bb->bbJumpKind) { case BBJ_COND: if (bb->bbJumpDest == block) { break; } FALLTHROUGH; case BBJ_NONE: noway_assert(bb->bbNext == block); break; case BBJ_EHFILTERRET: case BBJ_ALWAYS: case BBJ_EHCATCHRET: noway_assert(bb->bbJumpDest == block); break; default: break; } } } } #endif // DEBUG namespace { //------------------------------------------------------------------------ // LoopSearch: Class that handles scanning a range of blocks to detect a loop, // moving blocks to make the loop body contiguous, and recording the loop. // // We will use the following terminology: // HEAD - the basic block that flows into the loop ENTRY block (Currently MUST be lexically before entry). // Not part of the looping of the loop. // TOP - the target of the backward edge from BOTTOM, and the lexically first basic block (in bbNext order) // within this loop. // BOTTOM - the lexically last block in the loop (i.e. the block from which we jump to the top) // EXIT - the predecessor of loop's unique exit edge, if it has a unique exit edge; else nullptr // ENTRY - the entry in the loop (not necessarly the TOP), but there must be only one entry // // We (currently) require the body of a loop to be a contiguous (in bbNext order) sequence of basic blocks. // When the loop is identified, blocks will be moved out to make it a compact contiguous region if possible, // and in cases where compaction is not possible, we'll subsequently treat all blocks in the lexical range // between TOP and BOTTOM as part of the loop even if they aren't part of the SCC. // Regarding nesting: Since a given block can only have one back-edge (we only detect loops with back-edges // from BBJ_COND or BBJ_ALWAYS blocks), no two loops will share the same BOTTOM. Two loops may share the // same TOP/ENTRY as reported by LoopSearch, and optCanonicalizeLoopNest will subsequently re-write // the CFG so that no two loops share the same TOP/ENTRY anymore. // // | // v // head // | // | top <--+ // | | | // | ... | // | | | // | v | // +---> entry | // | | // ... | // | | // v | // +-- exit/tail | // | | | // | ... | // | | | // | v | // | bottom ---+ // | // +------+ // | // v // class LoopSearch { // Keeping track of which blocks are in the loop requires two block sets since we may add blocks // as we go but the BlockSet type's max ID doesn't increase to accommodate them. Define a helper // struct to make the ensuing code more readable. struct LoopBlockSet { private: // Keep track of blocks with bbNum <= oldBlockMaxNum in a regular BlockSet, since // it can hold all of them. BlockSet oldBlocksInLoop; // Blocks with bbNum <= oldBlockMaxNum // Keep track of blocks with bbNum > oldBlockMaxNum in a separate BlockSet, but // indexing them by (blockNum - oldBlockMaxNum); since we won't generate more than // one new block per old block, this must be sufficient to track any new blocks. BlockSet newBlocksInLoop; // Blocks with bbNum > oldBlockMaxNum Compiler* comp; unsigned int oldBlockMaxNum; public: LoopBlockSet(Compiler* comp) : oldBlocksInLoop(BlockSetOps::UninitVal()) , newBlocksInLoop(BlockSetOps::UninitVal()) , comp(comp) , oldBlockMaxNum(comp->fgBBNumMax) { } void Reset(unsigned int seedBlockNum) { if (BlockSetOps::MayBeUninit(oldBlocksInLoop)) { // Either the block sets are uninitialized (and long), so we need to initialize // them (and allocate their backing storage), or they are short and empty, so // assigning MakeEmpty to them is as cheap as ClearD. oldBlocksInLoop = BlockSetOps::MakeEmpty(comp); newBlocksInLoop = BlockSetOps::MakeEmpty(comp); } else { // We know the backing storage is already allocated, so just clear it. BlockSetOps::ClearD(comp, oldBlocksInLoop); BlockSetOps::ClearD(comp, newBlocksInLoop); } assert(seedBlockNum <= oldBlockMaxNum); BlockSetOps::AddElemD(comp, oldBlocksInLoop, seedBlockNum); } bool CanRepresent(unsigned int blockNum) { // We can represent old blocks up to oldBlockMaxNum, and // new blocks up to 2 * oldBlockMaxNum. return (blockNum <= 2 * oldBlockMaxNum); } bool IsMember(unsigned int blockNum) { if (blockNum > oldBlockMaxNum) { return BlockSetOps::IsMember(comp, newBlocksInLoop, blockNum - oldBlockMaxNum); } else { return BlockSetOps::IsMember(comp, oldBlocksInLoop, blockNum); } } void Insert(unsigned int blockNum) { if (blockNum > oldBlockMaxNum) { BlockSetOps::AddElemD(comp, newBlocksInLoop, blockNum - oldBlockMaxNum); } else { BlockSetOps::AddElemD(comp, oldBlocksInLoop, blockNum); } } bool TestAndInsert(unsigned int blockNum) { if (blockNum > oldBlockMaxNum) { unsigned int shiftedNum = blockNum - oldBlockMaxNum; if (!BlockSetOps::IsMember(comp, newBlocksInLoop, shiftedNum)) { BlockSetOps::AddElemD(comp, newBlocksInLoop, shiftedNum); return false; } } else { if (!BlockSetOps::IsMember(comp, oldBlocksInLoop, blockNum)) { BlockSetOps::AddElemD(comp, oldBlocksInLoop, blockNum); return false; } } return true; } }; LoopBlockSet loopBlocks; // Set of blocks identified as part of the loop Compiler* comp; // See LoopSearch class comment header for a diagram relating these fields: BasicBlock* head; // Predecessor of unique entry edge BasicBlock* top; // Successor of back-edge from BOTTOM BasicBlock* bottom; // Predecessor of back-edge to TOP, also lexically last in-loop block BasicBlock* entry; // Successor of unique entry edge BasicBlock* lastExit; // Most recently discovered exit block unsigned char exitCount; // Number of discovered exit edges unsigned int oldBlockMaxNum; // Used to identify new blocks created during compaction BlockSet bottomBlocks; // BOTTOM blocks of already-recorded loops #ifdef DEBUG bool forgotExit = false; // Flags a rare case where lastExit gets nulled out, for assertions #endif bool changedFlowGraph = false; // Signals that loop compaction has modified the flow graph public: LoopSearch(Compiler* comp) : loopBlocks(comp), comp(comp), oldBlockMaxNum(comp->fgBBNumMax), bottomBlocks(BlockSetOps::MakeEmpty(comp)) { // Make sure we've renumbered such that the bitsets can hold all the bits assert(comp->fgBBNumMax <= comp->fgCurBBEpochSize); } //------------------------------------------------------------------------ // RecordLoop: Notify the Compiler that a loop has been found. // // Return Value: // true - Loop successfully recorded. // false - Compiler has run out of loop descriptors; loop not recorded. // bool RecordLoop() { // At this point we have a compact loop - record it in the loop table. // If we found only one exit, record it in the table too // (otherwise an exit = nullptr in the loop table means multiple exits). BasicBlock* onlyExit = (exitCount == 1 ? lastExit : nullptr); if (comp->optRecordLoop(head, top, entry, bottom, onlyExit, exitCount)) { // Record the BOTTOM block for future reference before returning. assert(bottom->bbNum <= oldBlockMaxNum); BlockSetOps::AddElemD(comp, bottomBlocks, bottom->bbNum); return true; } // Unable to record this loop because the loop descriptor table overflowed. return false; } //------------------------------------------------------------------------ // ChangedFlowGraph: Determine whether loop compaction has modified the flow graph. // // Return Value: // true - The flow graph has been modified; fgUpdateChangedFlowGraph should // be called (which is the caller's responsibility). // false - The flow graph has not been modified by this LoopSearch. // bool ChangedFlowGraph() { return changedFlowGraph; } //------------------------------------------------------------------------ // FindLoop: Search for a loop with the given HEAD block and back-edge. // // Arguments: // head - Block to be the HEAD of any loop identified // top - Block to be the TOP of any loop identified // bottom - Block to be the BOTTOM of any loop identified // // Return Value: // true - Found a valid loop. // false - Did not find a valid loop. // // Notes: // May modify flow graph to make loop compact before returning. // Will set instance fields to track loop's extent and exits if a valid // loop is found, and potentially trash them otherwise. // bool FindLoop(BasicBlock* head, BasicBlock* top, BasicBlock* bottom) { // Is this a loop candidate? - We look for "back edges", i.e. an edge from BOTTOM // to TOP (note that this is an abuse of notation since this is not necessarily a back edge // as the definition says, but merely an indication that we have a loop there). // Thus, we have to be very careful and after entry discovery check that it is indeed // the only place we enter the loop (especially for non-reducible flow graphs). if (top->bbNum > bottom->bbNum) // is this a backward edge? (from BOTTOM to TOP) { // Edge from BOTTOM to TOP is not a backward edge return false; } if (bottom->bbNum > oldBlockMaxNum) { // Not a true back-edge; bottom is a block added to reconnect fall-through during // loop processing, so its block number does not reflect its position. return false; } if (bottom->KindIs(BBJ_EHFINALLYRET, BBJ_EHFILTERRET, BBJ_EHCATCHRET, BBJ_CALLFINALLY, BBJ_SWITCH)) { // BBJ_EHFINALLYRET, BBJ_EHFILTERRET, BBJ_EHCATCHRET, and BBJ_CALLFINALLY can never form a loop. // BBJ_SWITCH that has a backward jump appears only for labeled break. return false; } // The presence of a "back edge" is an indication that a loop might be present here. // // Definition: A loop is: // 1. A collection of STRONGLY CONNECTED nodes i.e. there is a path from any // node in the loop to any other node in the loop (wholly within the loop) // 2. The loop has a unique ENTRY, i.e. there is only one way to reach a node // in the loop from outside the loop, and that is through the ENTRY // Let's find the loop ENTRY BasicBlock* entry = FindEntry(head, top, bottom); if (entry == nullptr) { // For now, we only recognize loops where HEAD has some successor ENTRY in the loop. return false; } // Passed the basic checks; initialize instance state for this back-edge. this->head = head; this->top = top; this->entry = entry; this->bottom = bottom; this->lastExit = nullptr; this->exitCount = 0; if (!HasSingleEntryCycle()) { // There isn't actually a loop between TOP and BOTTOM return false; } if (!loopBlocks.IsMember(top->bbNum)) { // The "back-edge" we identified isn't actually part of the flow cycle containing ENTRY return false; } // Disqualify loops where the first block of the loop is less nested in EH than // the bottom block. That is, we don't want to handle loops where the back edge // goes from within an EH region to a first block that is outside that same EH // region. Note that we *do* handle loops where the first block is the *first* // block of a more nested EH region (since it is legal to branch to the first // block of an immediately more nested EH region). So, for example, disqualify // this: // // BB02 // ... // try { // ... // BB10 BBJ_COND => BB02 // ... // } // // Here, BB10 is more nested than BB02. if (bottom->hasTryIndex() && !comp->bbInTryRegions(bottom->getTryIndex(), top)) { JITDUMP("Loop 'top' " FMT_BB " is in an outer EH region compared to loop 'bottom' " FMT_BB ". Rejecting " "loop.\n", top->bbNum, bottom->bbNum); return false; } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Disqualify loops where the first block of the loop is a finally target. // The main problem is when multiple loops share a 'top' block that is a finally // target and we canonicalize the loops by adding a new loop head. In that case, we // need to update the blocks so the finally target bit is moved to the newly created // block, and removed from the old 'top' block. This is 'hard', so it's easier to disallow // the loop than to update the flow graph to support this case. if ((top->bbFlags & BBF_FINALLY_TARGET) != 0) { JITDUMP("Loop 'top' " FMT_BB " is a finally target. Rejecting loop.\n", top->bbNum); return false; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Compact the loop (sweep through it and move out any blocks that aren't part of the // flow cycle), and find the exits. if (!MakeCompactAndFindExits()) { // Unable to preserve well-formed loop during compaction. return false; } // We have a valid loop. return true; } //------------------------------------------------------------------------ // GetExitCount: Return the exit count computed for the loop // unsigned char GetExitCount() const { return exitCount; } private: //------------------------------------------------------------------------ // FindEntry: See if given HEAD flows to valid ENTRY between given TOP and BOTTOM // // Arguments: // head - Block to be the HEAD of any loop identified // top - Block to be the TOP of any loop identified // bottom - Block to be the BOTTOM of any loop identified // // Return Value: // Block to be the ENTRY of any loop identified, or nullptr if no // such entry meeting our criteria can be found. // // Notes: // Returns main entry if one is found, does not check for side-entries. // BasicBlock* FindEntry(BasicBlock* head, BasicBlock* top, BasicBlock* bottom) { if (head->bbJumpKind == BBJ_ALWAYS) { if (head->bbJumpDest->bbNum <= bottom->bbNum && head->bbJumpDest->bbNum >= top->bbNum) { // OK - we enter somewhere within the loop. // Cannot enter at the top - should have being caught by redundant jumps assert((head->bbJumpDest != top) || (head->bbFlags & BBF_KEEP_BBJ_ALWAYS)); return head->bbJumpDest; } else { // special case - don't consider now // assert (!"Loop entered in weird way!"); return nullptr; } } // Can we fall through into the loop? else if (head->KindIs(BBJ_NONE, BBJ_COND)) { // The ENTRY is at the TOP (a do-while loop) return top; } else { return nullptr; // HEAD does not flow into the loop; bail for now } } //------------------------------------------------------------------------ // HasSingleEntryCycle: Perform a reverse flow walk from ENTRY, visiting // only blocks between TOP and BOTTOM, to determine if such a cycle // exists and if it has a single entry. // // Return Value: // true - Found a single-entry cycle. // false - Did not find a single-entry cycle. // // Notes: // Will mark (in `loopBlocks`) all blocks found to participate in the cycle. // bool HasSingleEntryCycle() { // Now do a backwards flow walk from entry to see if we have a single-entry loop bool foundCycle = false; // Seed the loop block set and worklist with the entry block. loopBlocks.Reset(entry->bbNum); jitstd::list<BasicBlock*> worklist(comp->getAllocator(CMK_LoopOpt)); worklist.push_back(entry); while (!worklist.empty()) { BasicBlock* block = worklist.back(); worklist.pop_back(); // Make sure ENTRY dominates all blocks in the loop. if (block->bbNum > oldBlockMaxNum) { // This is a new block we added to connect fall-through, so the // recorded dominator information doesn't cover it. Just continue, // and when we process its unique predecessor we'll abort if ENTRY // doesn't dominate that. } else if (!comp->fgDominate(entry, block)) { return false; } // Add preds to the worklist, checking for side-entries. for (BasicBlock* const predBlock : block->PredBlocks()) { unsigned int testNum = PositionNum(predBlock); if ((testNum < top->bbNum) || (testNum > bottom->bbNum)) { // Pred is out of loop range if (block == entry) { if (predBlock == head) { // This is the single entry we expect. continue; } // ENTRY has some pred other than head outside the loop. If ENTRY does not // dominate this pred, we'll consider this a side-entry and skip this loop; // otherwise the loop is still valid and this may be a (flow-wise) back-edge // of an outer loop. For the dominance test, if `predBlock` is a new block, use // its unique predecessor since the dominator tree has info for that. BasicBlock* effectivePred = (predBlock->bbNum > oldBlockMaxNum ? predBlock->bbPrev : predBlock); if (comp->fgDominate(entry, effectivePred)) { // Outer loop back-edge continue; } } // There are multiple entries to this loop, don't consider it. return false; } bool isFirstVisit; if (predBlock == entry) { // We have indeed found a cycle in the flow graph. isFirstVisit = !foundCycle; foundCycle = true; assert(loopBlocks.IsMember(predBlock->bbNum)); } else if (loopBlocks.TestAndInsert(predBlock->bbNum)) { // Already visited this pred isFirstVisit = false; } else { // Add this predBlock to the worklist worklist.push_back(predBlock); isFirstVisit = true; } if (isFirstVisit && (predBlock->bbNext != nullptr) && (PositionNum(predBlock->bbNext) == predBlock->bbNum)) { // We've created a new block immediately after `predBlock` to // reconnect what was fall-through. Mark it as in-loop also; // it needs to stay with `prev` and if it exits the loop we'd // just need to re-create it if we tried to move it out. loopBlocks.Insert(predBlock->bbNext->bbNum); } } } return foundCycle; } //------------------------------------------------------------------------ // PositionNum: Get the number identifying a block's position per the // lexical ordering that existed before searching for (and compacting) // loops. // // Arguments: // block - Block whose position is desired. // // Return Value: // A number indicating that block's position relative to others. // // Notes: // When the given block is a new one created during loop compaction, // the number of its unique predecessor is returned. // unsigned int PositionNum(BasicBlock* block) { if (block->bbNum > oldBlockMaxNum) { // This must be a block we inserted to connect fall-through after moving blocks. // To determine if it's in the loop or not, use the number of its unique predecessor // block. assert(block->bbPreds->getBlock() == block->bbPrev); assert(block->bbPreds->flNext == nullptr); return block->bbPrev->bbNum; } return block->bbNum; } //------------------------------------------------------------------------ // MakeCompactAndFindExits: Compact the loop (sweep through it and move out // any blocks that aren't part of the flow cycle), and find the exits (set // lastExit and exitCount). // // Return Value: // true - Loop successfully compacted (or `loopBlocks` expanded to // include all blocks in the lexical range), exits enumerated. // false - Loop cannot be made compact and remain well-formed. // bool MakeCompactAndFindExits() { // Compaction (if it needs to happen) will require an insertion point. BasicBlock* moveAfter = nullptr; for (BasicBlock* previous = top->bbPrev; previous != bottom;) { BasicBlock* block = previous->bbNext; if (loopBlocks.IsMember(block->bbNum)) { // This block is a member of the loop. Check to see if it may exit the loop. CheckForExit(block); // Done processing this block; move on to the next. previous = block; continue; } // This blocks is lexically between TOP and BOTTOM, but it does not // participate in the flow cycle. Check for a run of consecutive // such blocks. BasicBlock* lastNonLoopBlock = block; BasicBlock* nextLoopBlock = block->bbNext; while (!loopBlocks.IsMember(nextLoopBlock->bbNum)) { lastNonLoopBlock = nextLoopBlock; nextLoopBlock = nextLoopBlock->bbNext; // This loop must terminate because we know BOTTOM is in loopBlocks. } // Choose an insertion point for non-loop blocks if we haven't yet done so. if (moveAfter == nullptr) { moveAfter = FindInsertionPoint(); } if (!BasicBlock::sameEHRegion(previous, nextLoopBlock) || !BasicBlock::sameEHRegion(previous, moveAfter)) { // EH regions would be ill-formed if we moved these blocks out. // See if we can consider them loop blocks without introducing // a side-entry. if (CanTreatAsLoopBlocks(block, lastNonLoopBlock)) { // The call to `canTreatAsLoop` marked these blocks as part of the loop; // iterate without updating `previous` so that we'll analyze them as part // of the loop. continue; } else { // We can't move these out of the loop or leave them in, so just give // up on this loop. return false; } } // Now physically move the blocks. BasicBlock* moveBefore = moveAfter->bbNext; comp->fgUnlinkRange(block, lastNonLoopBlock); comp->fgMoveBlocksAfter(block, lastNonLoopBlock, moveAfter); comp->ehUpdateLastBlocks(moveAfter, lastNonLoopBlock); // Apply any adjustments needed for fallthrough at the boundaries of the moved region. FixupFallThrough(moveAfter, moveBefore, block); FixupFallThrough(lastNonLoopBlock, nextLoopBlock, moveBefore); // Also apply any adjustments needed where the blocks were snipped out of the loop. BasicBlock* newBlock = FixupFallThrough(previous, block, nextLoopBlock); if (newBlock != nullptr) { // This new block is in the loop and is a loop exit. loopBlocks.Insert(newBlock->bbNum); lastExit = newBlock; ++exitCount; } // Update moveAfter for the next insertion. moveAfter = lastNonLoopBlock; // Note that we've changed the flow graph, and continue without updating // `previous` so that we'll process nextLoopBlock. changedFlowGraph = true; } if ((exitCount == 1) && (lastExit == nullptr)) { // If we happen to have a loop with two exits, one of which goes to an // infinite loop that's lexically nested inside it, where the inner loop // can't be moved out, we can end up in this situation (because // CanTreatAsLoopBlocks will have decremented the count expecting to find // another exit later). Bump the exit count to 2, since downstream code // will not be prepared for null lastExit with exitCount of 1. assert(forgotExit); exitCount = 2; } // Loop compaction was successful return true; } //------------------------------------------------------------------------ // FindInsertionPoint: Find an appropriate spot to which blocks that are // lexically between TOP and BOTTOM but not part of the flow cycle // can be moved. // // Return Value: // Block after which to insert moved blocks. // BasicBlock* FindInsertionPoint() { // Find an insertion point for blocks we're going to move. Move them down // out of the loop, and if possible find a spot that won't break up fall-through. BasicBlock* moveAfter = bottom; while (moveAfter->bbFallsThrough()) { // Keep looking for a better insertion point if we can. BasicBlock* newMoveAfter = TryAdvanceInsertionPoint(moveAfter); if (newMoveAfter == nullptr) { // Ran out of candidate insertion points, so just split up the fall-through. return moveAfter; } moveAfter = newMoveAfter; } return moveAfter; } //------------------------------------------------------------------------ // TryAdvanceInsertionPoint: Find the next legal insertion point after // the given one, if one exists. // // Arguments: // oldMoveAfter - Prior insertion point; find the next after this. // // Return Value: // The next block after `oldMoveAfter` that is a legal insertion point // (i.e. blocks being swept out of the loop can be moved immediately // after it), if one exists, else nullptr. // BasicBlock* TryAdvanceInsertionPoint(BasicBlock* oldMoveAfter) { BasicBlock* newMoveAfter = oldMoveAfter->bbNext; if (!BasicBlock::sameEHRegion(oldMoveAfter, newMoveAfter)) { // Don't cross an EH region boundary. return nullptr; } if (newMoveAfter->KindIs(BBJ_ALWAYS, BBJ_COND)) { unsigned int destNum = newMoveAfter->bbJumpDest->bbNum; if ((destNum >= top->bbNum) && (destNum <= bottom->bbNum) && !loopBlocks.IsMember(destNum)) { // Reversing this branch out of block `newMoveAfter` could confuse this algorithm // (in particular, the edge would still be numerically backwards but no longer be // lexically backwards, so a lexical forward walk from TOP would not find BOTTOM), // so don't do that. // We're checking for BBJ_ALWAYS and BBJ_COND only here -- we don't need to // check for BBJ_SWITCH because we'd never consider it a loop back-edge. return nullptr; } } // Similarly check to see if advancing to `newMoveAfter` would reverse the lexical order // of an edge from the run of blocks being moved to `newMoveAfter` -- doing so would // introduce a new lexical back-edge, which could (maybe?) confuse the loop search // algorithm, and isn't desirable layout anyway. for (BasicBlock* const predBlock : newMoveAfter->PredBlocks()) { unsigned int predNum = predBlock->bbNum; if ((predNum >= top->bbNum) && (predNum <= bottom->bbNum) && !loopBlocks.IsMember(predNum)) { // Don't make this forward edge a backwards edge. return nullptr; } } if (IsRecordedBottom(newMoveAfter)) { // This is the BOTTOM of another loop; don't move any blocks past it, to avoid moving them // out of that loop (we should have already done so when processing that loop if it were legal). return nullptr; } // Advancing the insertion point is ok, except that we can't split up any CallFinally/BBJ_ALWAYS // pair, so if we've got such a pair recurse to see if we can move past the whole thing. return (newMoveAfter->isBBCallAlwaysPair() ? TryAdvanceInsertionPoint(newMoveAfter) : newMoveAfter); } //------------------------------------------------------------------------ // isOuterBottom: Determine if the given block is the BOTTOM of a previously // recorded loop. // // Arguments: // block - Block to check for BOTTOM-ness. // // Return Value: // true - The blocks was recorded as `bottom` of some earlier-processed loop. // false - No loops yet recorded have this block as their `bottom`. // bool IsRecordedBottom(BasicBlock* block) { if (block->bbNum > oldBlockMaxNum) { // This is a new block, which can't be an outer bottom block because we only allow old blocks // as BOTTOM. return false; } return BlockSetOps::IsMember(comp, bottomBlocks, block->bbNum); } //------------------------------------------------------------------------ // CanTreatAsLoopBlocks: If the given range of blocks can be treated as // loop blocks, add them to loopBlockSet and return true. Otherwise, // return false. // // Arguments: // firstNonLoopBlock - First block in the run to be subsumed. // lastNonLoopBlock - Last block in the run to be subsumed. // // Return Value: // true - The blocks from `fistNonLoopBlock` to `lastNonLoopBlock` were // successfully added to `loopBlocks`. // false - Treating the blocks from `fistNonLoopBlock` to `lastNonLoopBlock` // would not be legal (it would induce a side-entry). // // Notes: // `loopBlocks` may be modified even if `false` is returned. // `exitCount` and `lastExit` may be modified if this process identifies // in-loop edges that were previously counted as exits. // bool CanTreatAsLoopBlocks(BasicBlock* firstNonLoopBlock, BasicBlock* lastNonLoopBlock) { for (BasicBlock* const testBlock : comp->Blocks(firstNonLoopBlock, lastNonLoopBlock)) { for (BasicBlock* const testPred : testBlock->PredBlocks()) { unsigned int predPosNum = PositionNum(testPred); unsigned int firstNonLoopPosNum = PositionNum(firstNonLoopBlock); unsigned int lastNonLoopPosNum = PositionNum(lastNonLoopBlock); if (loopBlocks.IsMember(predPosNum) || ((predPosNum >= firstNonLoopPosNum) && (predPosNum <= lastNonLoopPosNum))) { // This pred is in the loop (or what will be the loop if we determine this // run of exit blocks doesn't include a side-entry). if (predPosNum < firstNonLoopPosNum) { // We've already counted this block as an exit, so decrement the count. --exitCount; if (lastExit == testPred) { // Erase this now-bogus `lastExit` entry. lastExit = nullptr; INDEBUG(forgotExit = true); } } } else { // This pred is not in the loop, so this constitutes a side-entry. return false; } } // Either we're going to abort the loop on a subsequent testBlock, or this // testBlock is part of the loop. loopBlocks.Insert(testBlock->bbNum); } // All blocks were ok to leave in the loop. return true; } //------------------------------------------------------------------------ // FixupFallThrough: Re-establish any broken control flow connectivity // and eliminate any "goto-next"s that were created by changing the // given block's lexical follower. // // Arguments: // block - Block whose `bbNext` has changed. // oldNext - Previous value of `block->bbNext`. // newNext - New value of `block->bbNext`. // // Return Value: // If a new block is created to reconnect flow, the new block is // returned; otherwise, nullptr. // BasicBlock* FixupFallThrough(BasicBlock* block, BasicBlock* oldNext, BasicBlock* newNext) { // If we create a new block, that will be our return value. BasicBlock* newBlock = nullptr; if (block->bbFallsThrough()) { // Need to reconnect the flow from `block` to `oldNext`. if ((block->bbJumpKind == BBJ_COND) && (block->bbJumpDest == newNext)) { // Reverse the jump condition GenTree* test = block->lastNode(); noway_assert(test->OperIsConditionalJump()); if (test->OperGet() == GT_JTRUE) { GenTree* cond = comp->gtReverseCond(test->AsOp()->gtOp1); assert(cond == test->AsOp()->gtOp1); // Ensure `gtReverseCond` did not create a new node. test->AsOp()->gtOp1 = cond; } else { comp->gtReverseCond(test); } // Redirect the Conditional JUMP to go to `oldNext` block->bbJumpDest = oldNext; } else { // Insert an unconditional jump to `oldNext` just after `block`. newBlock = comp->fgConnectFallThrough(block, oldNext); noway_assert((newBlock == nullptr) || loopBlocks.CanRepresent(newBlock->bbNum)); } } else if ((block->bbJumpKind == BBJ_ALWAYS) && (block->bbJumpDest == newNext)) { // We've made `block`'s jump target its bbNext, so remove the jump. if (!comp->fgOptimizeBranchToNext(block, newNext, block->bbPrev)) { // If optimizing away the goto-next failed for some reason, mark it KEEP_BBJ_ALWAYS to // prevent assertions from complaining about it. block->bbFlags |= BBF_KEEP_BBJ_ALWAYS; } } // Make sure we don't leave around a goto-next unless it's marked KEEP_BBJ_ALWAYS. assert(!block->KindIs(BBJ_COND, BBJ_ALWAYS) || (block->bbJumpDest != newNext) || ((block->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0)); return newBlock; } //------------------------------------------------------------------------ // CheckForExit: Check if the given block has any successor edges that are // loop exits, and update `lastExit` and `exitCount` if so. // // Arguments: // block - Block whose successor edges are to be checked. // // Notes: // If one block has multiple exiting successor edges, those are counted // as multiple exits in `exitCount`. // void CheckForExit(BasicBlock* block) { BasicBlock* exitPoint; switch (block->bbJumpKind) { case BBJ_COND: case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: assert(block->bbJumpDest); exitPoint = block->bbJumpDest; if (!loopBlocks.IsMember(exitPoint->bbNum)) { // Exit from a block other than BOTTOM lastExit = block; exitCount++; } break; case BBJ_NONE: break; case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: // The "try" associated with this "finally" must be in the same loop, so the // finally block will return control inside the loop. break; case BBJ_THROW: case BBJ_RETURN: // Those are exits from the loop lastExit = block; exitCount++; break; case BBJ_SWITCH: for (BasicBlock* const exitPoint : block->SwitchTargets()) { if (!loopBlocks.IsMember(exitPoint->bbNum)) { lastExit = block; exitCount++; } } break; default: noway_assert(!"Unexpected bbJumpKind"); break; } if (block->bbFallsThrough() && !loopBlocks.IsMember(block->bbNext->bbNum)) { // Found a fall-through exit. lastExit = block; exitCount++; } } }; } // end (anonymous) namespace //------------------------------------------------------------------------ // optFindNaturalLoops: Find the natural loops, using dominators. Note that the test for // a loop is slightly different from the standard one, because we have not done a depth // first reordering of the basic blocks. // // See LoopSearch class comment header for a description of the loops found. // // We will find and record a maximum of BasicBlock::MAX_LOOP_NUM loops (currently 64). // void Compiler::optFindNaturalLoops() { #ifdef DEBUG if (verbose) { printf("*************** In optFindNaturalLoops()\n"); } #endif // DEBUG noway_assert(fgDomsComputed); assert(fgHasLoops); #if COUNT_LOOPS hasMethodLoops = false; loopsThisMethod = 0; loopOverflowThisMethod = false; #endif LoopSearch search(this); for (BasicBlock* head = fgFirstBB; head->bbNext != nullptr; head = head->bbNext) { BasicBlock* top = head->bbNext; // Blocks that are rarely run have a zero bbWeight and should never be optimized here. if (top->bbWeight == BB_ZERO_WEIGHT) { continue; } for (BasicBlock* const predBlock : top->PredBlocks()) { if (search.FindLoop(head, top, predBlock)) { // Found a loop; record it and see if we've hit the limit. bool recordedLoop = search.RecordLoop(); (void)recordedLoop; // avoid unusued variable warnings in COUNT_LOOPS and !DEBUG #if COUNT_LOOPS if (!hasMethodLoops) { // Mark the method as containing natural loops totalLoopMethods++; hasMethodLoops = true; } // Increment total number of loops found totalLoopCount++; loopsThisMethod++; // Keep track of the number of exits loopExitCountTable.record(static_cast<unsigned>(search.GetExitCount())); // Note that we continue to look for loops even if // (optLoopCount == BasicBlock::MAX_LOOP_NUM), in contrast to the !COUNT_LOOPS code below. // This gives us a better count and stats. Hopefully it doesn't affect actual codegen. CLANG_FORMAT_COMMENT_ANCHOR; #else // COUNT_LOOPS assert(recordedLoop); if (optLoopCount == BasicBlock::MAX_LOOP_NUM) { // We won't be able to record any more loops, so stop looking. goto NO_MORE_LOOPS; } #endif // COUNT_LOOPS // Continue searching preds of `top` to see if any other are // back-edges (this can happen for nested loops). The iteration // is safe because the compaction we do only modifies predecessor // lists of blocks that gain or lose fall-through from their // `bbPrev`, but since the motion is from within the loop to below // it, we know we're not altering the relationship between `top` // and its `bbPrev`. } } } #if !COUNT_LOOPS NO_MORE_LOOPS: #endif // !COUNT_LOOPS #if COUNT_LOOPS loopCountTable.record(loopsThisMethod); if (maxLoopsPerMethod < loopsThisMethod) { maxLoopsPerMethod = loopsThisMethod; } if (loopOverflowThisMethod) { totalLoopOverflows++; } #endif // COUNT_LOOPS bool mod = search.ChangedFlowGraph(); if (mod) { // Need to renumber blocks now since loop canonicalization // depends on it; can defer the rest of fgUpdateChangedFlowGraph() // until after canonicalizing loops. Dominator information is // recorded in terms of block numbers, so flag it invalid. fgDomsComputed = false; fgRenumberBlocks(); } // Now the loop indices are stable. We can figure out parent/child relationships // (using table indices to name loops), and label blocks. for (unsigned char loopInd = 1; loopInd < optLoopCount; loopInd++) { for (unsigned char possibleParent = loopInd; possibleParent > 0;) { possibleParent--; if (optLoopTable[possibleParent].lpContains(optLoopTable[loopInd])) { optLoopTable[loopInd].lpParent = possibleParent; optLoopTable[loopInd].lpSibling = optLoopTable[possibleParent].lpChild; optLoopTable[possibleParent].lpChild = loopInd; break; } } } // Now label the blocks with the innermost loop to which they belong. Since parents // precede children in the table, doing the labeling for each loop in order will achieve // this -- the innermost loop labeling will be done last. (Inner loop blocks will be // labeled multiple times before being correct at the end.) for (unsigned char loopInd = 0; loopInd < optLoopCount; loopInd++) { for (BasicBlock* const blk : optLoopTable[loopInd].LoopBlocks()) { blk->bbNatLoopNum = loopInd; } } // Make sure that loops are canonical: that every loop has a unique "top", by creating an empty "nop" // one, if necessary, for loops containing others that share a "top." for (unsigned char loopInd = 0; loopInd < optLoopCount; loopInd++) { // Traverse the outermost loops as entries into the loop nest; so skip non-outermost. if (optLoopTable[loopInd].lpParent != BasicBlock::NOT_IN_LOOP) { continue; } // Otherwise... if (optCanonicalizeLoopNest(loopInd)) { mod = true; } } if (mod) { constexpr bool computePreds = true; fgUpdateChangedFlowGraph(computePreds); } if (false /* pre-header stress */) { // Stress mode: aggressively create loop pre-header for every loop. for (unsigned loopInd = 0; loopInd < optLoopCount; loopInd++) { fgCreateLoopPreHeader(loopInd); } if (fgModified) { // The predecessors were maintained in fgCreateLoopPreHeader; don't rebuild them. constexpr bool computePreds = false; constexpr bool computeDoms = true; fgUpdateChangedFlowGraph(computePreds, computeDoms); } } #ifdef DEBUG if (verbose && (optLoopCount > 0)) { optPrintLoopTable(); } #endif // DEBUG } //------------------------------------------------------------------------ // optIdentifyLoopsForAlignment: Determine which loops should be considered for alignment. // // All innermost loops whose block weight meets a threshold are candidates for alignment. // The `first` block of the loop is marked with the BBF_LOOP_ALIGN flag to indicate this // (the loop table itself is not changed). // // Depends on the loop table, and on block weights being set. // void Compiler::optIdentifyLoopsForAlignment() { #if FEATURE_LOOP_ALIGN if (codeGen->ShouldAlignLoops()) { for (BasicBlock::loopNumber loopInd = 0; loopInd < optLoopCount; loopInd++) { // An innerloop candidate that might need alignment if (optLoopTable[loopInd].lpChild == BasicBlock::NOT_IN_LOOP) { BasicBlock* top = optLoopTable[loopInd].lpTop; weight_t topWeight = top->getBBWeight(this); if (topWeight >= (opts.compJitAlignLoopMinBlockWeight * BB_UNITY_WEIGHT)) { // Sometimes with JitOptRepeat > 1, we might end up finding the loops twice. In such // cases, make sure to count them just once. if (!top->isLoopAlign()) { loopAlignCandidates++; top->bbFlags |= BBF_LOOP_ALIGN; JITDUMP(FMT_LP " that starts at " FMT_BB " needs alignment, weight=" FMT_WT ".\n", loopInd, top->bbNum, top->getBBWeight(this)); } } else { JITDUMP("Skip alignment for " FMT_LP " that starts at " FMT_BB " weight=" FMT_WT ".\n", loopInd, top->bbNum, topWeight); } } } } #endif } //------------------------------------------------------------------------ // optRedirectBlock: Replace the branch successors of a block based on a block map. // // Updates the successors of `blk`: if `blk2` is a branch successor of `blk`, and there is a mapping // for `blk2->blk3` in `redirectMap`, change `blk` so that `blk3` is this branch successor. // // Note that fall-through successors are not modified, including predecessor lists. // // Arguments: // blk - block to redirect // redirectMap - block->block map specifying how the `blk` target will be redirected. // updatePreds - if `true`, update the predecessor lists to match. // void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds) { BasicBlock* newJumpDest = nullptr; switch (blk->bbJumpKind) { case BBJ_NONE: case BBJ_THROW: case BBJ_RETURN: case BBJ_EHFILTERRET: case BBJ_EHFINALLYRET: case BBJ_EHCATCHRET: // These have no jump destination to update. break; case BBJ_ALWAYS: case BBJ_LEAVE: case BBJ_CALLFINALLY: case BBJ_COND: // All of these have a single jump destination to update. if (redirectMap->Lookup(blk->bbJumpDest, &newJumpDest)) { if (updatePreds) { fgRemoveRefPred(blk->bbJumpDest, blk); fgAddRefPred(newJumpDest, blk); } blk->bbJumpDest = newJumpDest; } break; case BBJ_SWITCH: { bool redirected = false; for (unsigned i = 0; i < blk->bbJumpSwt->bbsCount; i++) { BasicBlock* switchDest = blk->bbJumpSwt->bbsDstTab[i]; if (redirectMap->Lookup(switchDest, &newJumpDest)) { if (updatePreds) { fgRemoveRefPred(switchDest, blk); fgAddRefPred(newJumpDest, blk); } blk->bbJumpSwt->bbsDstTab[i] = newJumpDest; redirected = true; } } // If any redirections happened, invalidate the switch table map for the switch. if (redirected) { // Don't create a new map just to try to remove an entry. BlockToSwitchDescMap* switchMap = GetSwitchDescMap(/* createIfNull */ false); if (switchMap != nullptr) { switchMap->Remove(blk); } } } break; default: unreached(); } } // TODO-Cleanup: This should be a static member of the BasicBlock class. void Compiler::optCopyBlkDest(BasicBlock* from, BasicBlock* to) { assert(from->bbJumpKind == to->bbJumpKind); // Precondition. // copy the jump destination(s) from "from" to "to". switch (to->bbJumpKind) { case BBJ_ALWAYS: case BBJ_LEAVE: case BBJ_CALLFINALLY: case BBJ_COND: // All of these have a single jump destination to update. to->bbJumpDest = from->bbJumpDest; break; case BBJ_SWITCH: to->bbJumpSwt = new (this, CMK_BasicBlock) BBswtDesc(this, from->bbJumpSwt); break; default: break; } } // Returns true if 'block' is an entry block for any loop in 'optLoopTable' bool Compiler::optIsLoopEntry(BasicBlock* block) const { for (unsigned char loopInd = 0; loopInd < optLoopCount; loopInd++) { if ((optLoopTable[loopInd].lpFlags & LPFLG_REMOVED) != 0) { continue; } if (optLoopTable[loopInd].lpEntry == block) { return true; } } return false; } // Canonicalize the loop nest rooted at parent loop 'loopInd'. // Returns 'true' if the flow graph is modified. bool Compiler::optCanonicalizeLoopNest(unsigned char loopInd) { bool modified = false; // Is the top of the current loop in any nested loop? if (optLoopTable[loopInd].lpTop->bbNatLoopNum != loopInd) { if (optCanonicalizeLoop(loopInd)) { modified = true; } } for (unsigned char child = optLoopTable[loopInd].lpChild; // child != BasicBlock::NOT_IN_LOOP; // child = optLoopTable[child].lpSibling) { if (optCanonicalizeLoopNest(child)) { modified = true; } } return modified; } bool Compiler::optCanonicalizeLoop(unsigned char loopInd) { // Is the top uniquely part of the current loop? BasicBlock* t = optLoopTable[loopInd].lpTop; if (t->bbNatLoopNum == loopInd) { return false; } JITDUMP("in optCanonicalizeLoop: " FMT_LP " has top " FMT_BB " (bottom " FMT_BB ") with natural loop number " FMT_LP ": need to canonicalize\n", loopInd, t->bbNum, optLoopTable[loopInd].lpBottom->bbNum, t->bbNatLoopNum); // Otherwise, the top of this loop is also part of a nested loop. // // Insert a new unique top for this loop. We must be careful to put this new // block in the correct EH region. Note that t->bbPrev might be in a different // EH region. For example: // // try { // ... // BB07 // } // BB08 // "first" // // In this case, first->bbPrev is BB07, which is in a different 'try' region. // On the other hand, the first block of multiple loops might be the first // block of a 'try' region that is completely contained in the multiple loops. // for example: // // BB08 try { } // ... // BB10 BBJ_ALWAYS => BB08 // ... // BB12 BBJ_ALWAYS => BB08 // // Here, we have two loops, both with BB08 as the "first" block. Block BB08 // is a single-block "try" region. Neither loop "bottom" block is in the same // "try" region as BB08. This is legal because you can jump to the first block // of a try region. With EH normalization, no two "try" regions will share // this block. In this case, we need to insert a new block for the outer loop // in the same EH region as the branch from the "bottom": // // BB30 BBJ_NONE // BB08 try { } // ... // BB10 BBJ_ALWAYS => BB08 // ... // BB12 BBJ_ALWAYS => BB30 // // Another possibility is that the "first" block of the loop nest can be the first block // of a "try" region that also has other predecessors than those in the loop, or even in // the "try" region (since blocks can target the first block of a "try" region). For example: // // BB08 try { // ... // BB10 BBJ_ALWAYS => BB08 // ... // BB12 BBJ_ALWAYS => BB08 // BB13 } // ... // BB20 BBJ_ALWAYS => BB08 // ... // BB25 BBJ_ALWAYS => BB08 // // Here, BB08 has 4 flow graph predecessors: BB10, BB12, BB20, BB25. These are all potential loop // bottoms, for four possible nested loops. However, we require all the loop bottoms to be in the // same EH region. For loops BB08..BB10 and BB08..BB12, we need to add a new "top" block within // the try region, immediately before BB08. The bottom of the loop BB08..BB10 loop will target the // old BB08, and the bottom of the BB08..BB12 loop will target the new loop header. The other branches // (BB20, BB25) must target the new loop header, both for correctness, and to avoid the illegal // situation of branching to a non-first block of a 'try' region. // // We can also have a loop nest where the "first" block is outside of a "try" region // and the back edges are inside a "try" region, for example: // // BB02 // "first" // ... // BB09 try { BBJ_COND => BB02 // ... // BB15 BBJ_COND => BB02 // ... // BB21 } // end of "try" // // In this case, both loop back edges were formed by "leave" instructions that were // imported into branches that were later made conditional. In this case, we don't // want to copy the EH region of the back edge, since that would create a block // outside of and disjoint with the "try" region of the back edge. However, to // simplify things, we disqualify this type of loop, so we should never see this here. BasicBlock* h = optLoopTable[loopInd].lpHead; BasicBlock* b = optLoopTable[loopInd].lpBottom; // The loop must be entirely contained within a single handler region. assert(BasicBlock::sameHndRegion(t, b)); // If the bottom block is in the same "try" region, then we extend the EH // region. Otherwise, we add the new block outside the "try" region. const bool extendRegion = BasicBlock::sameTryRegion(t, b); BasicBlock* newT = fgNewBBbefore(BBJ_NONE, t, extendRegion); if (!extendRegion) { // We need to set the EH region manually. Set it to be the same // as the bottom block. newT->copyEHRegion(b); } // The new block can reach the same set of blocks as the old one, but don't try to reflect // that in its reachability set here -- creating the new block may have changed the BlockSet // representation from short to long, and canonicalizing loops is immediately followed by // a call to fgUpdateChangedFlowGraph which will recompute the reachability sets anyway. // Redirect the "bottom" of the current loop to "newT". BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopOpt)) BlockToBlockMap(getAllocator(CMK_LoopOpt)); blockMap->Set(t, newT); optRedirectBlock(b, blockMap); // Redirect non-loop preds of "t" to also go to "newT". Inner loops that also branch to "t" should continue // to do so. However, there maybe be other predecessors from outside the loop nest that need to be updated // to point to "newT". This normally wouldn't happen, since they too would be part of the loop nest. However, // they might have been prevented from participating in the loop nest due to different EH nesting, or some // other reason. // // Note that optRedirectBlock doesn't update the predecessors list. So, if the same 't' block is processed // multiple times while canonicalizing multiple loop nests, we'll attempt to redirect a predecessor multiple times. // This is ok, because after the first redirection, the topPredBlock branch target will no longer match the source // edge of the blockMap, so nothing will happen. bool firstPred = true; for (BasicBlock* const topPredBlock : t->PredBlocks()) { // Skip if topPredBlock is in the loop. // Note that this uses block number to detect membership in the loop. We are adding blocks during // canonicalization, and those block numbers will be new, and larger than previous blocks. However, we work // outside-in, so we shouldn't encounter the new blocks at the loop boundaries, or in the predecessor lists. if (t->bbNum <= topPredBlock->bbNum && topPredBlock->bbNum <= b->bbNum) { JITDUMP("in optCanonicalizeLoop: 'top' predecessor " FMT_BB " is in the range of " FMT_LP " (" FMT_BB ".." FMT_BB "); not redirecting its bottom edge\n", topPredBlock->bbNum, loopInd, t->bbNum, b->bbNum); continue; } JITDUMP("in optCanonicalizeLoop: redirect top predecessor " FMT_BB " to " FMT_BB "\n", topPredBlock->bbNum, newT->bbNum); optRedirectBlock(topPredBlock, blockMap); // When we have profile data then the 'newT' block will inherit topPredBlock profile weight if (topPredBlock->hasProfileWeight()) { // This corrects an issue when the topPredBlock has a profile based weight // if (firstPred) { JITDUMP("in optCanonicalizeLoop: block " FMT_BB " will inheritWeight from " FMT_BB "\n", newT->bbNum, topPredBlock->bbNum); newT->inheritWeight(topPredBlock); firstPred = false; } else { JITDUMP("in optCanonicalizeLoop: block " FMT_BB " will also contribute to the weight of " FMT_BB "\n", newT->bbNum, topPredBlock->bbNum); weight_t newWeight = newT->getBBWeight(this) + topPredBlock->getBBWeight(this); newT->setBBProfileWeight(newWeight); } } } assert(newT->bbNext == t); // If it had been a do-while loop (top == entry), update entry, as well. BasicBlock* origE = optLoopTable[loopInd].lpEntry; if (optLoopTable[loopInd].lpTop == origE) { optLoopTable[loopInd].lpEntry = newT; } optLoopTable[loopInd].lpTop = newT; newT->bbNatLoopNum = loopInd; JITDUMP("in optCanonicalizeLoop: made new block " FMT_BB " [%p] the new unique top of loop %d.\n", newT->bbNum, dspPtr(newT), loopInd); // Make sure the head block still goes to the entry... if (h->bbJumpKind == BBJ_NONE && h->bbNext != optLoopTable[loopInd].lpEntry) { h->bbJumpKind = BBJ_ALWAYS; h->bbJumpDest = optLoopTable[loopInd].lpEntry; } else if (h->bbJumpKind == BBJ_COND && h->bbNext == newT && newT != optLoopTable[loopInd].lpEntry) { BasicBlock* h2 = fgNewBBafter(BBJ_ALWAYS, h, /*extendRegion*/ true); optLoopTable[loopInd].lpHead = h2; h2->bbJumpDest = optLoopTable[loopInd].lpEntry; h2->bbStmtList = nullptr; fgInsertStmtAtEnd(h2, fgNewStmtFromTree(gtNewOperNode(GT_NOP, TYP_VOID, nullptr))); } // If any loops nested in "loopInd" have the same head and entry as "loopInd", // it must be the case that they were do-while's (since "h" fell through to the entry). // The new node "newT" becomes the head of such loops. for (unsigned char childLoop = optLoopTable[loopInd].lpChild; // childLoop != BasicBlock::NOT_IN_LOOP; // childLoop = optLoopTable[childLoop].lpSibling) { if (optLoopTable[childLoop].lpEntry == origE && optLoopTable[childLoop].lpHead == h && newT->bbJumpKind == BBJ_NONE && newT->bbNext == origE) { optUpdateLoopHead(childLoop, h, newT); } } return true; } //----------------------------------------------------------------------------- // optLoopContains: Check if one loop contains another // // Arguments: // l1 -- loop num of containing loop (must be valid loop num) // l2 -- loop num of contained loop (valid loop num, or NOT_IN_LOOP) // // Returns: // True if loop described by l2 is contained within l1. // // Notes: // A loop contains itself. // bool Compiler::optLoopContains(unsigned l1, unsigned l2) const { assert(l1 < optLoopCount); assert((l2 < optLoopCount) || (l2 == BasicBlock::NOT_IN_LOOP)); if (l1 == l2) { return true; } else if (l2 == BasicBlock::NOT_IN_LOOP) { return false; } else { return optLoopContains(l1, optLoopTable[l2].lpParent); } } void Compiler::optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to) { assert(optLoopTable[loopInd].lpHead == from); optLoopTable[loopInd].lpHead = to; for (unsigned char childLoop = optLoopTable[loopInd].lpChild; childLoop != BasicBlock::NOT_IN_LOOP; childLoop = optLoopTable[childLoop].lpSibling) { if (optLoopTable[childLoop].lpHead == from) { optUpdateLoopHead(childLoop, from, to); } } } //----------------------------------------------------------------------------- // optIterSmallOverflow: Helper for loop unrolling. Determine if "i += const" will // cause an overflow exception for the small types. // // Arguments: // iterAtExit - iteration constant at loop exit // incrType - type of increment // // Returns: // true if overflow // // static bool Compiler::optIterSmallOverflow(int iterAtExit, var_types incrType) { int type_MAX; switch (incrType) { case TYP_BYTE: type_MAX = SCHAR_MAX; break; case TYP_UBYTE: type_MAX = UCHAR_MAX; break; case TYP_SHORT: type_MAX = SHRT_MAX; break; case TYP_USHORT: type_MAX = USHRT_MAX; break; case TYP_UINT: // Detected by checking for 32bit .... case TYP_INT: return false; // ... overflow same as done for TYP_INT default: NO_WAY("Bad type"); } if (iterAtExit > type_MAX) { return true; } else { return false; } } //----------------------------------------------------------------------------- // optIterSmallUnderflow: Helper for loop unrolling. Determine if "i -= const" will // cause an underflow exception for the small types. // // Arguments: // iterAtExit - iteration constant at loop exit // decrType - type of decrement // // Returns: // true if overflow // // static bool Compiler::optIterSmallUnderflow(int iterAtExit, var_types decrType) { int type_MIN; switch (decrType) { case TYP_BYTE: type_MIN = SCHAR_MIN; break; case TYP_SHORT: type_MIN = SHRT_MIN; break; case TYP_UBYTE: type_MIN = 0; break; case TYP_USHORT: type_MIN = 0; break; case TYP_UINT: // Detected by checking for 32bit .... case TYP_INT: return false; // ... underflow same as done for TYP_INT default: NO_WAY("Bad type"); } if (iterAtExit < type_MIN) { return true; } else { return false; } } //----------------------------------------------------------------------------- // optComputeLoopRep: Helper for loop unrolling. Computes the number of repetitions // in a constant loop. // // Arguments: // constInit - loop constant initial value // constLimit - loop constant limit // iterInc - loop iteration increment // iterOper - loop iteration increment operator (ADD, SUB, etc.) // iterOperType - iteration operator type // testOper - type of loop test (i.e. GT_LE, GT_GE, etc.) // unsTest - true if test is unsigned // dupCond - true if the loop head contains a test which skips this loop // iterCount - *iterCount is set to the iteration count, if the function returns `true` // // Returns: // true if the loop has a constant repetition count, false if that cannot be proven // bool Compiler::optComputeLoopRep(int constInit, int constLimit, int iterInc, genTreeOps iterOper, var_types iterOperType, genTreeOps testOper, bool unsTest, bool dupCond, unsigned* iterCount) { noway_assert(genActualType(iterOperType) == TYP_INT); __int64 constInitX; __int64 constLimitX; unsigned loopCount; int iterSign; // Using this, we can just do a signed comparison with other 32 bit values. if (unsTest) { constLimitX = (unsigned int)constLimit; } else { constLimitX = (signed int)constLimit; } switch (iterOperType) { // For small types, the iteration operator will narrow these values if big #define INIT_ITER_BY_TYPE(type) \ constInitX = (type)constInit; \ iterInc = (type)iterInc; case TYP_BYTE: INIT_ITER_BY_TYPE(signed char); break; case TYP_UBYTE: INIT_ITER_BY_TYPE(unsigned char); break; case TYP_SHORT: INIT_ITER_BY_TYPE(signed short); break; case TYP_USHORT: INIT_ITER_BY_TYPE(unsigned short); break; // For the big types, 32 bit arithmetic is performed case TYP_INT: case TYP_UINT: if (unsTest) { constInitX = (unsigned int)constInit; } else { constInitX = (signed int)constInit; } break; default: noway_assert(!"Bad type"); NO_WAY("Bad type"); } // If iterInc is zero we have an infinite loop. if (iterInc == 0) { return false; } // Set iterSign to +1 for positive iterInc and -1 for negative iterInc. iterSign = (iterInc > 0) ? +1 : -1; // Initialize loopCount to zero. loopCount = 0; // If dupCond is true then the loop head contains a test which skips // this loop, if the constInit does not pass the loop test. // Such a loop can execute zero times. // If dupCond is false then we have a true do-while loop which we // always execute the loop once before performing the loop test if (!dupCond) { loopCount += 1; constInitX += iterInc; } // bail if count is based on wrap-around math if (iterInc > 0) { if (constLimitX < constInitX) { return false; } } else if (constLimitX > constInitX) { return false; } // Compute the number of repetitions. switch (testOper) { __int64 iterAtExitX; case GT_EQ: // Something like "for (i=init; i == lim; i++)" doesn't make any sense. return false; case GT_NE: // Consider: "for (i = init; i != lim; i += const)" // This is tricky since it may have a constant number of iterations or loop forever. // We have to compute "(lim - init) mod iterInc" to see if it is zero. // If "mod iterInc" is not zero then the limit test will miss and a wrap will occur // which is probably not what the end user wanted, but it is legal. if (iterInc > 0) { // Stepping by one, i.e. Mod with 1 is always zero. if (iterInc != 1) { if (((constLimitX - constInitX) % iterInc) != 0) { return false; } } } else { noway_assert(iterInc < 0); // Stepping by -1, i.e. Mod with 1 is always zero. if (iterInc != -1) { if (((constInitX - constLimitX) % (-iterInc)) != 0) { return false; } } } switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX != constLimitX) { loopCount += (unsigned)((constLimitX - constInitX - iterSign) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if iteration incr will cause overflow for small types if (optIterSmallOverflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit overflow. Bad for TYP_(U)INT if (iterAtExitX < constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } case GT_LT: switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX < constLimitX) { loopCount += (unsigned)((constLimitX - constInitX - iterSign) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if iteration incr will cause overflow for small types if (optIterSmallOverflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit overflow. Bad for TYP_(U)INT if (iterAtExitX < constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } case GT_LE: switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX <= constLimitX) { loopCount += (unsigned)((constLimitX - constInitX) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if iteration incr will cause overflow for small types if (optIterSmallOverflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit overflow. Bad for TYP_(U)INT if (iterAtExitX <= constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } case GT_GT: switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX > constLimitX) { loopCount += (unsigned)((constLimitX - constInitX - iterSign) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if small types will underflow if (optIterSmallUnderflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit underflow. Bad for TYP_INT and unsigneds if (iterAtExitX > constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } case GT_GE: switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX >= constLimitX) { loopCount += (unsigned)((constLimitX - constInitX) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if small types will underflow if (optIterSmallUnderflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit underflow. Bad for TYP_INT and unsigneds if (iterAtExitX >= constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } default: noway_assert(!"Unknown operator for loop condition"); } return false; } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif //----------------------------------------------------------------------------- // optUnrollLoops: Look for loop unrolling candidates and unroll them. // // Loops must be of the form: // for (i=icon; i<icon; i++) { ... } // // Loops handled are fully unrolled; there is no partial unrolling. // // Limitations: only the following loop types are handled: // 1. "while" loops (top entry) // 2. constant initializer, constant bound // 3. The entire loop must be in the same EH region. // 4. The loop iteration variable can't be address exposed. // 5. The loop iteration variable can't be a promoted struct field. // 6. We must be able to calculate the total constant iteration count. // 7. On x86, there is a limit to the number of return blocks. So if there are return blocks in the loop that // would be unrolled, the unrolled code can't exceed that limit. // // Cost heuristics: // 1. there are cost metrics for maximum number of allowed iterations, and maximum unroll size // 2. single-iteration loops are always allowed (to eliminate the loop structure). // 3. otherwise, only loops where the limit is Vector<T>.Length are currently allowed // // In stress modes, these heuristic limits are expanded, and loops aren't required to have the // Vector<T>.Length limit. // // Loops are processed from innermost to outermost order, to attempt to unroll the most nested loops first. // // Returns: // suitable phase status // PhaseStatus Compiler::optUnrollLoops() { if (compCodeOpt() == SMALL_CODE) { return PhaseStatus::MODIFIED_NOTHING; } if (optLoopCount == 0) { return PhaseStatus::MODIFIED_NOTHING; } #ifdef DEBUG if (JitConfig.JitNoUnroll()) { return PhaseStatus::MODIFIED_NOTHING; } #endif #ifdef DEBUG if (verbose) { printf("*************** In optUnrollLoops()\n"); } #endif /* Look for loop unrolling candidates */ bool change = false; bool anyNestedLoopsUnrolled = false; INDEBUG(int unrollCount = 0); // count of loops unrolled INDEBUG(int unrollFailures = 0); // count of loops attempted to be unrolled, but failed static const unsigned ITER_LIMIT[COUNT_OPT_CODE + 1] = { 10, // BLENDED_CODE 0, // SMALL_CODE 20, // FAST_CODE 0 // COUNT_OPT_CODE }; assert(ITER_LIMIT[SMALL_CODE] == 0); assert(ITER_LIMIT[COUNT_OPT_CODE] == 0); unsigned iterLimit = ITER_LIMIT[compCodeOpt()]; #ifdef DEBUG if (compStressCompile(STRESS_UNROLL_LOOPS, 50)) { iterLimit *= 10; } #endif static const int UNROLL_LIMIT_SZ[COUNT_OPT_CODE + 1] = { 300, // BLENDED_CODE 0, // SMALL_CODE 600, // FAST_CODE 0 // COUNT_OPT_CODE }; assert(UNROLL_LIMIT_SZ[SMALL_CODE] == 0); assert(UNROLL_LIMIT_SZ[COUNT_OPT_CODE] == 0); // Visit loops from highest to lowest number to visit them in innermost to outermost order. for (unsigned lnum = optLoopCount - 1; lnum != ~0U; --lnum) { // This is necessary due to an apparent analysis limitation since // optLoopCount must be strictly greater than 0 upon entry and lnum // cannot wrap due to the loop termination condition. PREFAST_ASSUME(lnum != 0U - 1); LoopDsc& loop = optLoopTable[lnum]; BasicBlock* head; BasicBlock* top; BasicBlock* bottom; BasicBlock* initBlock; bool dupCond; // Does the 'head' block contain a duplicate loop condition (zero trip test)? int lbeg; // initial value for iterator int llim; // limit value for iterator unsigned lvar; // iterator lclVar # int iterInc; // value to increment the iterator genTreeOps iterOper; // type of iterator increment (i.e. ADD, SUB, etc.) var_types iterOperType; // type result of the oper (for overflow instrs) genTreeOps testOper; // type of loop test (i.e. GT_LE, GT_GE, etc.) bool unsTest; // Is the comparison unsigned? unsigned loopRetCount; // number of BBJ_RETURN blocks in loop unsigned totalIter; // total number of iterations in the constant loop const unsigned loopFlags = loop.lpFlags; // Check for required flags: // LPFLG_CONST_INIT - required because this transform only handles full unrolls // LPFLG_CONST_LIMIT - required because this transform only handles full unrolls const unsigned requiredFlags = LPFLG_CONST_INIT | LPFLG_CONST_LIMIT; if ((loopFlags & requiredFlags) != requiredFlags) { // Don't print to the JitDump about this common case. continue; } // Ignore if removed or marked as not unrollable. if (loopFlags & (LPFLG_DONT_UNROLL | LPFLG_REMOVED)) { // Don't print to the JitDump about this common case. continue; } // This transform only handles loops of this form if (!loop.lpIsTopEntry()) { JITDUMP("Failed to unroll loop " FMT_LP ": not top entry\n", lnum); continue; } head = loop.lpHead; noway_assert(head != nullptr); top = loop.lpTop; noway_assert(top != nullptr); bottom = loop.lpBottom; noway_assert(bottom != nullptr); // Get the loop data: // - initial constant // - limit constant // - iterator // - iterator increment // - increment operation type (i.e. ADD, SUB, etc...) // - loop test type (i.e. GT_GE, GT_LT, etc...) initBlock = loop.lpInitBlock; lbeg = loop.lpConstInit; llim = loop.lpConstLimit(); testOper = loop.lpTestOper(); lvar = loop.lpIterVar(); iterInc = loop.lpIterConst(); iterOper = loop.lpIterOper(); iterOperType = loop.lpIterOperType(); unsTest = (loop.lpTestTree->gtFlags & GTF_UNSIGNED) != 0; if (lvaTable[lvar].IsAddressExposed()) { // If the loop iteration variable is address-exposed then bail JITDUMP("Failed to unroll loop " FMT_LP ": V%02u is address exposed\n", lnum, lvar); continue; } if (lvaTable[lvar].lvIsStructField) { // If the loop iteration variable is a promoted field from a struct then bail JITDUMP("Failed to unroll loop " FMT_LP ": V%02u is a promoted struct field\n", lnum, lvar); continue; } // Locate/initialize the increment/test statements. Statement* initStmt = initBlock->lastStmt(); noway_assert((initStmt != nullptr) && (initStmt->GetNextStmt() == nullptr)); Statement* testStmt = bottom->lastStmt(); noway_assert((testStmt != nullptr) && (testStmt->GetNextStmt() == nullptr)); Statement* incrStmt = testStmt->GetPrevStmt(); noway_assert(incrStmt != nullptr); if (initStmt->GetRootNode()->OperIs(GT_JTRUE)) { // Must be a duplicated loop condition. dupCond = true; initStmt = initStmt->GetPrevStmt(); noway_assert(initStmt != nullptr); } else { dupCond = false; } // Find the number of iterations - the function returns false if not a constant number. if (!optComputeLoopRep(lbeg, llim, iterInc, iterOper, iterOperType, testOper, unsTest, dupCond, &totalIter)) { JITDUMP("Failed to unroll loop " FMT_LP ": not a constant iteration count\n", lnum); continue; } // Forget it if there are too many repetitions or not a constant loop. if (totalIter > iterLimit) { JITDUMP("Failed to unroll loop " FMT_LP ": too many iterations (%d > %d) (heuristic)\n", lnum, totalIter, iterLimit); continue; } int unrollLimitSz = UNROLL_LIMIT_SZ[compCodeOpt()]; if (INDEBUG(compStressCompile(STRESS_UNROLL_LOOPS, 50) ||) false) { // In stress mode, quadruple the size limit, and drop // the restriction that loop limit must be vector element count. unrollLimitSz *= 4; } else if (totalIter <= 1) { // No limit for single iteration loops // If there is no iteration (totalIter == 0), we will remove the loop body entirely. unrollLimitSz = INT_MAX; } else if (!(loopFlags & LPFLG_SIMD_LIMIT)) { // Otherwise unroll only if limit is Vector_.Length // (as a heuristic, not for correctness/structural reasons) JITDUMP("Failed to unroll loop " FMT_LP ": constant limit isn't Vector<T>.Length (heuristic)\n", lnum); continue; } GenTree* incr = incrStmt->GetRootNode(); // Don't unroll loops we don't understand. if (incr->gtOper != GT_ASG) { JITDUMP("Failed to unroll loop " FMT_LP ": unknown increment op (%s)\n", lnum, GenTree::OpName(incr->gtOper)); continue; } incr = incr->AsOp()->gtOp2; GenTree* init = initStmt->GetRootNode(); // Make sure everything looks ok. // clang-format off if ((init->gtOper != GT_ASG) || (init->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || (init->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != lvar) || (init->AsOp()->gtOp2->gtOper != GT_CNS_INT) || (init->AsOp()->gtOp2->AsIntCon()->gtIconVal != lbeg) || !((incr->gtOper == GT_ADD) || (incr->gtOper == GT_SUB)) || (incr->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || (incr->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != lvar) || (incr->AsOp()->gtOp2->gtOper != GT_CNS_INT) || (incr->AsOp()->gtOp2->AsIntCon()->gtIconVal != iterInc) || (testStmt->GetRootNode()->gtOper != GT_JTRUE)) { noway_assert(!"Bad precondition in Compiler::optUnrollLoops()"); continue; } // clang-format on // Heuristic: Estimated cost in code size of the unrolled loop. { ClrSafeInt<unsigned> loopCostSz; // Cost is size of one iteration auto tryIndex = loop.lpTop->bbTryIndex; // Besides calculating the loop cost, also ensure that all loop blocks are within the same EH // region, and count the number of BBJ_RETURN blocks in the loop. loopRetCount = 0; for (BasicBlock* const block : loop.LoopBlocks()) { if (block->bbTryIndex != tryIndex) { // Unrolling would require cloning EH regions JITDUMP("Failed to unroll loop " FMT_LP ": EH constraint\n", lnum); goto DONE_LOOP; } if (block->bbJumpKind == BBJ_RETURN) { ++loopRetCount; } for (Statement* const stmt : block->Statements()) { gtSetStmtInfo(stmt); loopCostSz += stmt->GetCostSz(); } } #ifdef JIT32_GCENCODER if ((totalIter > 0) && (fgReturnCount + loopRetCount * (totalIter - 1) > SET_EPILOGCNT_MAX)) { // Jit32 GC encoder can't report more than SET_EPILOGCNT_MAX epilogs. JITDUMP("Failed to unroll loop " FMT_LP ": GC encoder max epilog constraint\n", lnum); goto DONE_LOOP; } #endif // !JIT32_GCENCODER // Compute the estimated increase in code size for the unrolled loop. ClrSafeInt<unsigned> fixedLoopCostSz(8); ClrSafeInt<int> unrollCostSz = ClrSafeInt<int>(loopCostSz * ClrSafeInt<unsigned>(totalIter)) - ClrSafeInt<int>(loopCostSz + fixedLoopCostSz); // Don't unroll if too much code duplication would result. if (unrollCostSz.IsOverflow() || (unrollCostSz.Value() > unrollLimitSz)) { JITDUMP("Failed to unroll loop " FMT_LP ": size constraint (%d > %d) (heuristic)\n", lnum, unrollCostSz.Value(), unrollLimitSz); goto DONE_LOOP; } // Looks like a good idea to unroll this loop, let's do it! CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("\nUnrolling loop "); optPrintLoopInfo(&loop); printf(" over V%02u from %u to %u unrollCostSz = %d\n\n", lvar, lbeg, llim, unrollCostSz); } #endif } #if FEATURE_LOOP_ALIGN for (BasicBlock* const block : loop.LoopBlocks()) { block->unmarkLoopAlign(this DEBUG_ARG("Unrolled loop")); } #endif // Create the unrolled loop statement list. { // When unrolling a loop, that loop disappears (and will be removed from the loop table). Each unrolled // block will be set to exist within the parent loop, if any. However, if we unroll a loop that has // nested loops, we will create multiple copies of the nested loops. This requires adding new loop table // entries to represent the new loops. Instead of trying to do this incrementally, in the case where // nested loops exist (in any unrolled loop) we rebuild the entire loop table after unrolling. BlockToBlockMap blockMap(getAllocator(CMK_LoopOpt)); BasicBlock* insertAfter = bottom; BasicBlock::loopNumber newLoopNum = loop.lpParent; bool anyNestedLoopsUnrolledThisLoop = false; int lval; unsigned iterToUnroll = totalIter; // The number of iterations left to unroll for (lval = lbeg; iterToUnroll > 0; iterToUnroll--) { // Note: we can't use the loop.LoopBlocks() iterator, as it captures loop.lpBottom->bbNext at the // beginning of iteration, and we insert blocks before that. So we need to evaluate lpBottom->bbNext // every iteration. for (BasicBlock* block = loop.lpTop; block != loop.lpBottom->bbNext; block = block->bbNext) { BasicBlock* newBlock = insertAfter = fgNewBBafter(block->bbJumpKind, insertAfter, /*extendRegion*/ true); blockMap.Set(block, newBlock, BlockToBlockMap::Overwrite); if (!BasicBlock::CloneBlockState(this, newBlock, block, lvar, lval)) { // CloneBlockState (specifically, gtCloneExpr) doesn't handle everything. If it fails // to clone a block in the loop, splice out and forget all the blocks we cloned so far: // put the loop blocks back to how they were before we started cloning blocks, // and abort unrolling the loop. BasicBlock* oldBottomNext = insertAfter->bbNext; bottom->bbNext = oldBottomNext; oldBottomNext->bbPrev = bottom; loop.lpFlags |= LPFLG_DONT_UNROLL; // Mark it so we don't try to unroll it again. INDEBUG(++unrollFailures); JITDUMP("Failed to unroll loop " FMT_LP ": block cloning failed on " FMT_BB "\n", lnum, block->bbNum); goto DONE_LOOP; } // All blocks in the unrolled loop will now be marked with the parent loop number. Note that // if the loop being unrolled contains nested (child) loops, we will notice this below (when // we set anyNestedLoopsUnrolledThisLoop), and that will cause us to rebuild the entire loop // table and all loop annotations on blocks. However, if the loop contains no nested loops, // setting the block `bbNatLoopNum` here is sufficient to incrementally update the block's // loop info. newBlock->bbNatLoopNum = newLoopNum; // Block weight should no longer have the loop multiplier // // Note this is not quite right, as we may not have upscaled by this amount // and we might not have upscaled at all, if we had profile data. // newBlock->scaleBBWeight(1.0 / BB_LOOP_WEIGHT_SCALE); // Jump dests are set in a post-pass; make sure CloneBlockState hasn't tried to set them. assert(newBlock->bbJumpDest == nullptr); if (block == bottom) { // Remove the test; we're doing a full unroll. Statement* testCopyStmt = newBlock->lastStmt(); GenTree* testCopyExpr = testCopyStmt->GetRootNode(); assert(testCopyExpr->gtOper == GT_JTRUE); GenTree* sideEffList = nullptr; gtExtractSideEffList(testCopyExpr, &sideEffList, GTF_SIDE_EFFECT | GTF_ORDER_SIDEEFF); if (sideEffList == nullptr) { fgRemoveStmt(newBlock, testCopyStmt); } else { testCopyStmt->SetRootNode(sideEffList); } newBlock->bbJumpKind = BBJ_NONE; } } // Now redirect any branches within the newly-cloned iteration. // Don't include `bottom` in the iteration, since we've already changed the // newBlock->bbJumpKind, above. for (BasicBlock* block = loop.lpTop; block != loop.lpBottom; block = block->bbNext) { BasicBlock* newBlock = blockMap[block]; optCopyBlkDest(block, newBlock); optRedirectBlock(newBlock, &blockMap); } /* update the new value for the unrolled iterator */ switch (iterOper) { case GT_ADD: lval += iterInc; break; case GT_SUB: lval -= iterInc; break; case GT_RSH: case GT_LSH: noway_assert(!"Unrolling not implemented for this loop iterator"); goto DONE_LOOP; default: noway_assert(!"Unknown operator for constant loop iterator"); goto DONE_LOOP; } } // If we get here, we successfully cloned all the blocks in the unrolled loop. // Gut the old loop body for (BasicBlock* const block : loop.LoopBlocks()) { // Check if the old loop body had any nested loops that got cloned. Note that we need to do this // here, and not in the loop above, to handle the special case where totalIter is zero, and the // above loop doesn't execute. if (block->bbNatLoopNum != lnum) { anyNestedLoopsUnrolledThisLoop = true; } block->bbStmtList = nullptr; block->bbJumpKind = BBJ_NONE; block->bbFlags &= ~BBF_LOOP_HEAD; block->bbJumpDest = nullptr; block->bbNatLoopNum = newLoopNum; } if (anyNestedLoopsUnrolledThisLoop) { anyNestedLoopsUnrolled = true; } // If the HEAD is a BBJ_COND drop the condition (and make HEAD a BBJ_NONE block). if (head->bbJumpKind == BBJ_COND) { testStmt = head->lastStmt(); noway_assert(testStmt->GetRootNode()->gtOper == GT_JTRUE); fgRemoveStmt(head, testStmt); head->bbJumpKind = BBJ_NONE; } else { /* the loop must execute */ noway_assert(head->bbJumpKind == BBJ_NONE); } #ifdef DEBUG if (verbose) { printf("Whole unrolled loop:\n"); gtDispTree(initStmt->GetRootNode()); printf("\n"); fgDumpTrees(top, insertAfter); if (anyNestedLoopsUnrolledThisLoop) { printf("Unrolled loop " FMT_LP " contains nested loops\n", lnum); } } #endif // DEBUG // Update loop table. optMarkLoopRemoved(lnum); // Note if we created new BBJ_RETURNs (or removed some). if (totalIter > 0) { fgReturnCount += loopRetCount * (totalIter - 1); } else { assert(totalIter == 0); assert(fgReturnCount >= loopRetCount); fgReturnCount -= loopRetCount; } // Remember that something has changed. INDEBUG(++unrollCount); change = true; } DONE_LOOP:; } if (change) { #ifdef DEBUG if (verbose) { printf("\nFinished unrolling %d loops", unrollCount); if (unrollFailures > 0) { printf(", %d failures due to block cloning", unrollFailures); } printf("\n"); if (anyNestedLoopsUnrolled) { printf("At least one unrolled loop contains nested loops; recomputing loop table\n"); } } #endif // DEBUG // If we unrolled any nested loops, we rebuild the loop table (including recomputing the // return blocks list). constexpr bool computePreds = true; constexpr bool computeDoms = true; const bool computeReturnBlocks = anyNestedLoopsUnrolled; const bool computeLoops = anyNestedLoopsUnrolled; fgUpdateChangedFlowGraph(computePreds, computeDoms, computeReturnBlocks, computeLoops); DBEXEC(verbose, fgDispBasicBlocks()); } else { #ifdef DEBUG assert(unrollCount == 0); assert(!anyNestedLoopsUnrolled); if (unrollFailures > 0) { printf("\nFinished loop unrolling, %d failures due to block cloning\n", unrollFailures); } #endif // DEBUG } #ifdef DEBUG fgDebugCheckBBlist(true); fgDebugCheckLoopTable(); #endif // DEBUG return PhaseStatus::MODIFIED_EVERYTHING; } #ifdef _PREFAST_ #pragma warning(pop) #endif /***************************************************************************** * * Return false if there is a code path from 'topBB' to 'botBB' that might * not execute a method call. */ bool Compiler::optReachWithoutCall(BasicBlock* topBB, BasicBlock* botBB) { // TODO-Cleanup: Currently BBF_GC_SAFE_POINT is not set for helper calls, // as some helper calls are neither interruptible nor hijackable. // When we can determine this, then we can set BBF_GC_SAFE_POINT for // those helpers too. noway_assert(topBB->bbNum <= botBB->bbNum); // We can always check topBB and botBB for any gc safe points and early out if ((topBB->bbFlags | botBB->bbFlags) & BBF_GC_SAFE_POINT) { return false; } // Otherwise we will need to rely upon the dominator sets if (!fgDomsComputed) { // return a conservative answer of true when we don't have the dominator sets return true; } BasicBlock* curBB = topBB; for (;;) { noway_assert(curBB); // If we added a loop pre-header block then we will // have a bbNum greater than fgLastBB, and we won't have // any dominator information about this block, so skip it. // if (curBB->bbNum <= fgLastBB->bbNum) { noway_assert(curBB->bbNum <= botBB->bbNum); // Does this block contain a gc safe point? if (curBB->bbFlags & BBF_GC_SAFE_POINT) { // Will this block always execute on the way to botBB ? // // Since we are checking every block in [topBB .. botBB] and we are using // a lexical definition of a loop. // (all that we know is that is that botBB is a back-edge to topBB) // Thus while walking blocks in this range we may encounter some blocks // that are not really part of the loop, and so we need to perform // some additional checks: // // We will check that the current 'curBB' is reachable from 'topBB' // and that it dominates the block containing the back-edge 'botBB' // When both of these are true then we know that the gcsafe point in 'curBB' // will be encountered in the loop and we can return false // if (fgDominate(curBB, botBB) && fgReachable(topBB, curBB)) { return false; } } else { // If we've reached the destination block, then we're done if (curBB == botBB) { break; } } } curBB = curBB->bbNext; } // If we didn't find any blocks that contained a gc safe point and // also met the fgDominate and fgReachable criteria then we must return true // return true; } // static Compiler::fgWalkResult Compiler::optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data) { OptInvertCountTreeInfoType* o = (OptInvertCountTreeInfoType*)data->pCallbackData; if (Compiler::IsSharedStaticHelper(*pTree)) { o->sharedStaticHelperCount += 1; } if ((*pTree)->OperGet() == GT_ARR_LENGTH) { o->arrayLengthCount += 1; } return WALK_CONTINUE; } //----------------------------------------------------------------------------- // optInvertWhileLoop: modify flow and duplicate code so that for/while loops are // entered at top and tested at bottom (aka loop rotation or bottom testing). // Creates a "zero trip test" condition which guards entry to the loop. // Enables loop invariant hoisting and loop cloning, which depend on // `do {} while` format loops. Enables creation of a pre-header block after the // zero trip test to place code that only runs if the loop is guaranteed to // run at least once. // // Arguments: // block -- block that may be the predecessor of the un-rotated loop's test block. // // Returns: // true if any IR changes possibly made (used to determine phase return status) // // Notes: // Uses a simple lexical screen to detect likely loops. // // Specifically, we're looking for the following case: // // ... // jmp test // `block` argument // loop: // ... // ... // test: // ..stmts.. // cond // jtrue loop // // If we find this, and the condition is simple enough, we change // the loop to the following: // // ... // ..stmts.. // duplicated cond block statments // cond // duplicated cond // jfalse done // // else fall-through // loop: // ... // ... // test: // ..stmts.. // cond // jtrue loop // done: // // Makes no changes if the flow pattern match fails. // // May not modify a loop if profile is unfavorable, if the cost of duplicating // code is large (factoring in potential CSEs). // bool Compiler::optInvertWhileLoop(BasicBlock* block) { assert(opts.OptimizationEnabled()); assert(compCodeOpt() != SMALL_CODE); // Does the BB end with an unconditional jump? if (block->bbJumpKind != BBJ_ALWAYS || (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) { // It can't be one of the ones we use for our exception magic return false; } // Get hold of the jump target BasicBlock* bTest = block->bbJumpDest; // Does the block consist of 'jtrue(cond) block' ? if (bTest->bbJumpKind != BBJ_COND) { return false; } // bTest must be a backwards jump to block->bbNext if (bTest->bbJumpDest != block->bbNext) { return false; } // Since test is a BBJ_COND it will have a bbNext noway_assert(bTest->bbNext != nullptr); // 'block' must be in the same try region as the condition, since we're going to insert a duplicated condition // in a new block after 'block', and the condition might include exception throwing code. // On non-funclet platforms (x86), the catch exit is a BBJ_ALWAYS, but we don't want that to // be considered as the head of a loop, so also disallow different handler regions. if (!BasicBlock::sameEHRegion(block, bTest)) { return false; } // The duplicated condition block will branch to bTest->bbNext, so that also better be in the // same try region (or no try region) to avoid generating illegal flow. BasicBlock* bTestNext = bTest->bbNext; if (bTestNext->hasTryIndex() && !BasicBlock::sameTryRegion(block, bTestNext)) { return false; } // It has to be a forward jump. Defer this check until after all the cheap checks // are done, since it iterates forward in the block list looking for bbJumpDest. // TODO-CQ: Check if we can also optimize the backwards jump as well. // if (!fgIsForwardBranch(block)) { return false; } // Find the loop termination test at the bottom of the loop. Statement* condStmt = bTest->lastStmt(); // Verify the test block ends with a conditional that we can manipulate. GenTree* const condTree = condStmt->GetRootNode(); noway_assert(condTree->gtOper == GT_JTRUE); if (!condTree->AsOp()->gtOp1->OperIsCompare()) { return false; } // Estimate the cost of cloning the entire test block. // // Note: it would help throughput to compute the maximum cost // first and early out for large bTest blocks, as we are doing two // tree walks per tree. But because of this helper call scan, the // maximum cost depends on the trees in the block. // // We might consider flagging blocks with hoistable helper calls // during importation, so we can avoid the helper search and // implement an early bail out for large blocks with no helper calls. // // Note that gtPrepareCost can cause operand swapping, so we must // return `true` (possible IR change) from here on. unsigned estDupCostSz = 0; for (Statement* const stmt : bTest->Statements()) { GenTree* tree = stmt->GetRootNode(); gtPrepareCost(tree); estDupCostSz += tree->GetCostSz(); } weight_t loopIterations = BB_LOOP_WEIGHT_SCALE; bool allProfileWeightsAreValid = false; weight_t const weightBlock = block->bbWeight; weight_t const weightTest = bTest->bbWeight; weight_t const weightNext = block->bbNext->bbWeight; // If we have profile data then we calculate the number of times // the loop will iterate into loopIterations if (fgIsUsingProfileWeights()) { // Only rely upon the profile weight when all three of these blocks // have good profile weights if (block->hasProfileWeight() && bTest->hasProfileWeight() && block->bbNext->hasProfileWeight()) { // If this while loop never iterates then don't bother transforming // if (weightNext == BB_ZERO_WEIGHT) { return true; } // We generally expect weightTest == weightNext + weightBlock. // // Tolerate small inconsistencies... // if (!fgProfileWeightsConsistent(weightBlock + weightNext, weightTest)) { JITDUMP("Profile weights locally inconsistent: block " FMT_WT ", next " FMT_WT ", test " FMT_WT "\n", weightBlock, weightNext, weightTest); } else { allProfileWeightsAreValid = true; // Determine iteration count // // weightNext is the number of time this loop iterates // weightBlock is the number of times that we enter the while loop // loopIterations is the average number of times that this loop iterates // loopIterations = weightNext / weightBlock; } } else { JITDUMP("Missing profile data for loop!\n"); } } unsigned maxDupCostSz = 34; if ((compCodeOpt() == FAST_CODE) || compStressCompile(STRESS_DO_WHILE_LOOPS, 30)) { maxDupCostSz *= 4; } // If this loop iterates a lot then raise the maxDupCost if (loopIterations >= 12.0) { maxDupCostSz *= 2; if (loopIterations >= 96.0) { maxDupCostSz *= 2; } } // If the compare has too high cost then we don't want to dup. bool costIsTooHigh = (estDupCostSz > maxDupCostSz); OptInvertCountTreeInfoType optInvertTotalInfo = {}; if (costIsTooHigh) { // If we already know that the cost is acceptable, then don't waste time walking the tree // counting things to boost the maximum allowed cost. // // If the loop condition has a shared static helper, we really want this loop converted // as not converting the loop will disable loop hoisting, meaning the shared helper will // be executed on every loop iteration. // // If the condition has array.Length operations, also boost, as they are likely to be CSE'd. for (Statement* const stmt : bTest->Statements()) { GenTree* tree = stmt->GetRootNode(); OptInvertCountTreeInfoType optInvertInfo = {}; fgWalkTreePre(&tree, Compiler::optInvertCountTreeInfo, &optInvertInfo); optInvertTotalInfo.sharedStaticHelperCount += optInvertInfo.sharedStaticHelperCount; optInvertTotalInfo.arrayLengthCount += optInvertInfo.arrayLengthCount; if ((optInvertInfo.sharedStaticHelperCount > 0) || (optInvertInfo.arrayLengthCount > 0)) { // Calculate a new maximum cost. We might be able to early exit. unsigned newMaxDupCostSz = maxDupCostSz + 24 * min(optInvertTotalInfo.sharedStaticHelperCount, (int)(loopIterations + 1.5)) + 8 * optInvertTotalInfo.arrayLengthCount; // Is the cost too high now? costIsTooHigh = (estDupCostSz > newMaxDupCostSz); if (!costIsTooHigh) { // No need counting any more trees; we're going to do the transformation. JITDUMP("Decided to duplicate loop condition block after counting helpers in tree [%06u] in " "block " FMT_BB, dspTreeID(tree), bTest->bbNum); maxDupCostSz = newMaxDupCostSz; // for the JitDump output below break; } } } } #ifdef DEBUG if (verbose) { // Note that `optInvertTotalInfo.sharedStaticHelperCount = 0` means either there were zero helpers, or the // tree walk to count them was not done. printf( "\nDuplication of loop condition [%06u] is %s, because the cost of duplication (%i) is %s than %i," "\n loopIterations = %7.3f, optInvertTotalInfo.sharedStaticHelperCount >= %d, validProfileWeights = %s\n", dspTreeID(condTree), costIsTooHigh ? "not done" : "performed", estDupCostSz, costIsTooHigh ? "greater" : "less or equal", maxDupCostSz, loopIterations, optInvertTotalInfo.sharedStaticHelperCount, dspBool(allProfileWeightsAreValid)); } #endif if (costIsTooHigh) { return true; } bool foundCondTree = false; // Create a new block after `block` to put the copied condition code. block->bbJumpKind = BBJ_NONE; block->bbJumpDest = nullptr; BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true); // Clone each statement in bTest and append to bNewCond. for (Statement* const stmt : bTest->Statements()) { GenTree* originalTree = stmt->GetRootNode(); GenTree* clonedTree = gtCloneExpr(originalTree); // Special case handling needed for the conditional jump tree if (originalTree == condTree) { foundCondTree = true; // Get the compare subtrees GenTree* originalCompareTree = originalTree->AsOp()->gtOp1; GenTree* clonedCompareTree = clonedTree->AsOp()->gtOp1; assert(originalCompareTree->OperIsCompare()); assert(clonedCompareTree->OperIsCompare()); // Flag compare and cloned copy so later we know this loop // has a proper zero trip test. originalCompareTree->gtFlags |= GTF_RELOP_ZTT; clonedCompareTree->gtFlags |= GTF_RELOP_ZTT; // The original test branches to remain in the loop. The // new cloned test will branch to avoid the loop. So the // cloned compare needs to reverse the branch condition. gtReverseCond(clonedCompareTree); } Statement* clonedStmt = fgNewStmtAtEnd(bNewCond, clonedTree); if (opts.compDbgInfo) { clonedStmt->SetDebugInfo(stmt->GetDebugInfo()); } } assert(foundCondTree); // Flag the block that received the copy as potentially having an array/vtable // reference, nullcheck, object/array allocation if the block copied from did; // this is a conservative guess. if (auto copyFlags = bTest->bbFlags & (BBF_HAS_IDX_LEN | BBF_HAS_NULLCHECK | BBF_HAS_NEWOBJ | BBF_HAS_NEWARRAY)) { bNewCond->bbFlags |= copyFlags; } bNewCond->bbJumpDest = bTest->bbNext; bNewCond->inheritWeight(block); // Update bbRefs and bbPreds for 'bNewCond', 'bNewCond->bbNext' 'bTest' and 'bTest->bbNext'. fgAddRefPred(bNewCond, block); fgAddRefPred(bNewCond->bbNext, bNewCond); fgRemoveRefPred(bTest, block); fgAddRefPred(bTest->bbNext, bNewCond); // Move all predecessor edges that look like loop entry edges to point to the new cloned condition // block, not the existing condition block. The idea is that if we only move `block` to point to // `bNewCond`, but leave other `bTest` predecessors still pointing to `bTest`, when we eventually // recognize loops, the loop will appear to have multiple entries, which will prevent optimization. // We don't have loops yet, but blocks should be in increasing lexical numbered order, so use that // as the proxy for predecessors that are "in" versus "out" of the potential loop. Note that correctness // is maintained no matter which condition block we point to, but we'll lose optimization potential // (and create spaghetti code) if we get it wrong. BlockToBlockMap blockMap(getAllocator(CMK_LoopOpt)); bool blockMapInitialized = false; unsigned loopFirstNum = bNewCond->bbNext->bbNum; unsigned loopBottomNum = bTest->bbNum; for (BasicBlock* const predBlock : bTest->PredBlocks()) { unsigned bNum = predBlock->bbNum; if ((loopFirstNum <= bNum) && (bNum <= loopBottomNum)) { // Looks like the predecessor is from within the potential loop; skip it. continue; } if (!blockMapInitialized) { blockMapInitialized = true; blockMap.Set(bTest, bNewCond); } // Redirect the predecessor to the new block. JITDUMP("Redirecting non-loop " FMT_BB " -> " FMT_BB " to " FMT_BB " -> " FMT_BB "\n", predBlock->bbNum, bTest->bbNum, predBlock->bbNum, bNewCond->bbNum); optRedirectBlock(predBlock, &blockMap, /*updatePreds*/ true); } // If we have profile data for all blocks and we know that we are cloning the // `bTest` block into `bNewCond` and thus changing the control flow from `block` so // that it no longer goes directly to `bTest` anymore, we have to adjust // various weights. // if (allProfileWeightsAreValid) { // Update the weight for bTest // JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", bTest->bbNum, weightTest, weightNext); bTest->bbWeight = weightNext; // Determine the new edge weights. // // We project the next/jump ratio for block and bTest by using // the original likelihoods out of bTest. // // Note "next" is the loop top block, not bTest's bbNext, // we'll call this latter block "after". // weight_t const testToNextLikelihood = min(1.0, weightNext / weightTest); weight_t const testToAfterLikelihood = 1.0 - testToNextLikelihood; // Adjust edges out of bTest (which now has weight weightNext) // weight_t const testToNextWeight = weightNext * testToNextLikelihood; weight_t const testToAfterWeight = weightNext * testToAfterLikelihood; flowList* const edgeTestToNext = fgGetPredForBlock(bTest->bbJumpDest, bTest); flowList* const edgeTestToAfter = fgGetPredForBlock(bTest->bbNext, bTest); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (iterate loop)\n", bTest->bbNum, bTest->bbJumpDest->bbNum, testToNextWeight); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (exit loop)\n", bTest->bbNum, bTest->bbNext->bbNum, testToAfterWeight); edgeTestToNext->setEdgeWeights(testToNextWeight, testToNextWeight, bTest->bbJumpDest); edgeTestToAfter->setEdgeWeights(testToAfterWeight, testToAfterWeight, bTest->bbNext); // Adjust edges out of block, using the same distribution. // JITDUMP("Profile weight of " FMT_BB " remains unchanged at " FMT_WT "\n", block->bbNum, weightBlock); weight_t const blockToNextLikelihood = testToNextLikelihood; weight_t const blockToAfterLikelihood = testToAfterLikelihood; weight_t const blockToNextWeight = weightBlock * blockToNextLikelihood; weight_t const blockToAfterWeight = weightBlock * blockToAfterLikelihood; flowList* const edgeBlockToNext = fgGetPredForBlock(bNewCond->bbNext, bNewCond); flowList* const edgeBlockToAfter = fgGetPredForBlock(bNewCond->bbJumpDest, bNewCond); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (enter loop)\n", bNewCond->bbNum, bNewCond->bbNext->bbNum, blockToNextWeight); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (avoid loop)\n", bNewCond->bbNum, bNewCond->bbJumpDest->bbNum, blockToAfterWeight); edgeBlockToNext->setEdgeWeights(blockToNextWeight, blockToNextWeight, bNewCond->bbNext); edgeBlockToAfter->setEdgeWeights(blockToAfterWeight, blockToAfterWeight, bNewCond->bbJumpDest); #ifdef DEBUG // Verify profile for the two target blocks is consistent. // fgDebugCheckIncomingProfileData(bNewCond->bbNext); fgDebugCheckIncomingProfileData(bNewCond->bbJumpDest); #endif // DEBUG } #ifdef DEBUG if (verbose) { printf("\nDuplicated loop exit block at " FMT_BB " for loop (" FMT_BB " - " FMT_BB ")\n", bNewCond->bbNum, bNewCond->bbNext->bbNum, bTest->bbNum); printf("Estimated code size expansion is %d\n", estDupCostSz); fgDumpBlock(bNewCond); fgDumpBlock(bTest); } #endif // DEBUG return true; } //----------------------------------------------------------------------------- // optInvertLoops: invert while loops in the method // // Returns: // suitable phase status // PhaseStatus Compiler::optInvertLoops() { noway_assert(opts.OptimizationEnabled()); noway_assert(fgModified == false); #if defined(OPT_CONFIG) if (!JitConfig.JitDoLoopInversion()) { JITDUMP("Loop inversion disabled\n"); return PhaseStatus::MODIFIED_NOTHING; } #endif // OPT_CONFIG if (compCodeOpt() == SMALL_CODE) { return PhaseStatus::MODIFIED_NOTHING; } bool madeChanges = false; // Assume no changes made for (BasicBlock* const block : Blocks()) { // Make sure the appropriate fields are initialized // if (block->bbWeight == BB_ZERO_WEIGHT) { // Zero weighted block can't have a LOOP_HEAD flag noway_assert(block->isLoopHead() == false); continue; } if (optInvertWhileLoop(block)) { madeChanges = true; } } if (fgModified) { // Reset fgModified here as we've done a consistent set of edits. // fgModified = false; } return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING; } //----------------------------------------------------------------------------- // optOptimizeLayout: reorder blocks to reduce cost of control flow // // Returns: // suitable phase status // PhaseStatus Compiler::optOptimizeLayout() { noway_assert(opts.OptimizationEnabled()); noway_assert(fgModified == false); bool madeChanges = false; const bool allowTailDuplication = true; madeChanges |= fgUpdateFlowGraph(allowTailDuplication); madeChanges |= fgReorderBlocks(); madeChanges |= fgUpdateFlowGraph(); // fgReorderBlocks can cause IR changes even if it does not modify // the flow graph. It calls gtPrepareCost which can cause operand swapping. // Work around this for now. // // Note phase status only impacts dumping and checking done post-phase, // it has no impact on a release build. // madeChanges = true; return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING; } //------------------------------------------------------------------------ // optMarkLoopHeads: Mark all potential loop heads as BBF_LOOP_HEAD. A potential loop head is a block // targeted by a lexical back edge, where the source of the back edge is reachable from the block. // Note that if there are no lexical back edges, there can't be any loops. // // If there are any potential loop heads, set `fgHasLoops` to `true`. // // Assumptions: // The reachability sets must be computed and valid. // void Compiler::optMarkLoopHeads() { #ifdef DEBUG if (verbose) { printf("*************** In optMarkLoopHeads()\n"); } assert(!fgCheapPredsValid); assert(fgReachabilitySetsValid); fgDebugCheckBBNumIncreasing(); int loopHeadsMarked = 0; #endif bool hasLoops = false; for (BasicBlock* const block : Blocks()) { // Set BBF_LOOP_HEAD if we have backwards branches to this block. unsigned blockNum = block->bbNum; for (BasicBlock* const predBlock : block->PredBlocks()) { if (blockNum <= predBlock->bbNum) { if (predBlock->bbJumpKind == BBJ_CALLFINALLY) { // Loops never have BBJ_CALLFINALLY as the source of their "back edge". continue; } // If block can reach predBlock then we have a loop head if (BlockSetOps::IsMember(this, predBlock->bbReach, blockNum)) { hasLoops = true; block->bbFlags |= BBF_LOOP_HEAD; INDEBUG(++loopHeadsMarked); break; // No need to look at more `block` predecessors } } } } JITDUMP("%d loop heads marked\n", loopHeadsMarked); fgHasLoops = hasLoops; } //----------------------------------------------------------------------------- // optResetLoopInfo: reset all loop info in preparation for rebuilding the loop table, or preventing // future phases from accessing loop-related data. // void Compiler::optResetLoopInfo() { #ifdef DEBUG if (verbose) { printf("*************** In optResetLoopInfo()\n"); } #endif optLoopCount = 0; // This will force the table to be rebuilt loopAlignCandidates = 0; // This will cause users to crash if they use the table when it is considered empty. // TODO: the loop table is always allocated as the same (maximum) size, so this is wasteful. // We could zero it out (possibly only in DEBUG) to be paranoid, but there's no reason to // force it to be re-allocated. optLoopTable = nullptr; for (BasicBlock* const block : Blocks()) { // If the block weight didn't come from profile data, reset it so it can be calculated again. if (!block->hasProfileWeight()) { block->bbWeight = BB_UNITY_WEIGHT; block->bbFlags &= ~BBF_RUN_RARELY; } block->bbFlags &= ~BBF_LOOP_FLAGS; block->bbNatLoopNum = BasicBlock::NOT_IN_LOOP; } } //----------------------------------------------------------------------------- // optFindAndScaleGeneralLoopBlocks: scale block weights based on loop nesting depth. // Note that this uses a very general notion of "loop": any block targeted by a reachable // back-edge is considered a loop. // void Compiler::optFindAndScaleGeneralLoopBlocks() { #ifdef DEBUG if (verbose) { printf("*************** In optFindAndScaleGeneralLoopBlocks()\n"); } #endif // This code depends on block number ordering. INDEBUG(fgDebugCheckBBNumIncreasing()); unsigned generalLoopCount = 0; // We will use the following terminology: // top - the first basic block in the loop (i.e. the head of the backward edge) // bottom - the last block in the loop (i.e. the block from which we jump to the top) // lastBottom - used when we have multiple back edges to the same top for (BasicBlock* const top : Blocks()) { // Only consider `top` blocks already determined to be potential loop heads. if (!top->isLoopHead()) { continue; } BasicBlock* foundBottom = nullptr; for (BasicBlock* const bottom : top->PredBlocks()) { // Is this a loop candidate? - We look for "back edges" // Is this a backward edge? (from BOTTOM to TOP) if (top->bbNum > bottom->bbNum) { continue; } // We only consider back-edges that are BBJ_COND or BBJ_ALWAYS for loops. if ((bottom->bbJumpKind != BBJ_COND) && (bottom->bbJumpKind != BBJ_ALWAYS)) { continue; } /* the top block must be able to reach the bottom block */ if (!fgReachable(top, bottom)) { continue; } /* Found a new loop, record the longest backedge in foundBottom */ if ((foundBottom == nullptr) || (bottom->bbNum > foundBottom->bbNum)) { foundBottom = bottom; } } if (foundBottom) { generalLoopCount++; /* Mark all blocks between 'top' and 'bottom' */ optScaleLoopBlocks(top, foundBottom); } // We track at most 255 loops if (generalLoopCount == 255) { #if COUNT_LOOPS totalUnnatLoopOverflows++; #endif break; } } JITDUMP("\nFound a total of %d general loops.\n", generalLoopCount); #if COUNT_LOOPS totalUnnatLoopCount += generalLoopCount; #endif } //----------------------------------------------------------------------------- // optFindLoops: find loops in the function. // // The JIT recognizes two types of loops in a function: natural loops and "general" (or "unnatural") loops. // Natural loops are those which get added to the loop table. Most downstream optimizations require // using natural loops. See `optFindNaturalLoops` for a definition of the criteria for recognizing a natural loop. // A general loop is defined as a lexical (program order) range of blocks where a later block branches to an // earlier block (that is, there is a back edge in the flow graph), and the later block is reachable from the earlier // block. General loops are used for weighting flow graph blocks (when there is no block profile data), as well as // for determining if we require fully interruptible GC information. // // Notes: // Also (re)sets all non-IBC block weights, and marks loops potentially needing alignment padding. // void Compiler::optFindLoops() { #ifdef DEBUG if (verbose) { printf("*************** In optFindLoops()\n"); } #endif noway_assert(opts.OptimizationEnabled()); assert(fgDomsComputed); optMarkLoopHeads(); // Were there any potential loops in the flow graph? if (fgHasLoops) { optFindNaturalLoops(); optFindAndScaleGeneralLoopBlocks(); optIdentifyLoopsForAlignment(); // Check if any of the loops need alignment } #ifdef DEBUG fgDebugCheckLoopTable(); #endif optLoopsMarked = true; } //----------------------------------------------------------------------------- // optFindLoopsPhase: The wrapper function for the "find loops" phase. // PhaseStatus Compiler::optFindLoopsPhase() { optFindLoops(); return PhaseStatus::MODIFIED_EVERYTHING; } /***************************************************************************** * * Determine the kind of interference for the call. */ /* static */ inline Compiler::callInterf Compiler::optCallInterf(GenTreeCall* call) { // if not a helper, kills everything if (call->gtCallType != CT_HELPER) { return CALLINT_ALL; } // setfield and array address store kill all indirections switch (eeGetHelperNum(call->gtCallMethHnd)) { case CORINFO_HELP_ASSIGN_REF: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_CHECKED_ASSIGN_REF: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_ASSIGN_BYREF: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_SETFIELDOBJ: case CORINFO_HELP_ARRADDR_ST: return CALLINT_REF_INDIRS; case CORINFO_HELP_SETFIELDFLOAT: case CORINFO_HELP_SETFIELDDOUBLE: case CORINFO_HELP_SETFIELD8: case CORINFO_HELP_SETFIELD16: case CORINFO_HELP_SETFIELD32: case CORINFO_HELP_SETFIELD64: return CALLINT_SCL_INDIRS; case CORINFO_HELP_ASSIGN_STRUCT: // Not strictly needed as we don't use this case CORINFO_HELP_MEMSET: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_MEMCPY: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_SETFIELDSTRUCT: return CALLINT_ALL_INDIRS; default: break; } // other helpers kill nothing return CALLINT_NONE; } /***************************************************************************** * * See if the given tree can be computed in the given precision (which must * be smaller than the type of the tree for this to make sense). If 'doit' * is false, we merely check to see whether narrowing is possible; if we * get called with 'doit' being true, we actually perform the narrowing. */ bool Compiler::optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit) { genTreeOps oper; unsigned kind; noway_assert(tree); noway_assert(genActualType(tree->gtType) == genActualType(srct)); /* Assume we're only handling integer types */ noway_assert(varTypeIsIntegral(srct)); noway_assert(varTypeIsIntegral(dstt)); unsigned srcSize = genTypeSize(srct); unsigned dstSize = genTypeSize(dstt); /* dstt must be smaller than srct to narrow */ if (dstSize >= srcSize) { return false; } /* Figure out what kind of a node we have */ oper = tree->OperGet(); kind = tree->OperKind(); if (oper == GT_ASG) { noway_assert(doit == false); return false; } ValueNumPair NoVNPair = ValueNumPair(); if (kind & GTK_LEAF) { switch (oper) { /* Constants can usually be narrowed by changing their value */ CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT __int64 lval; __int64 lmask; case GT_CNS_LNG: lval = tree->AsIntConCommon()->LngValue(); lmask = 0; switch (dstt) { case TYP_BYTE: lmask = 0x0000007F; break; case TYP_BOOL: case TYP_UBYTE: lmask = 0x000000FF; break; case TYP_SHORT: lmask = 0x00007FFF; break; case TYP_USHORT: lmask = 0x0000FFFF; break; case TYP_INT: lmask = 0x7FFFFFFF; break; case TYP_UINT: lmask = 0xFFFFFFFF; break; default: return false; } if ((lval & lmask) != lval) return false; if (doit) { tree->BashToConst(static_cast<int32_t>(lval)); if (vnStore != nullptr) { fgValueNumberTreeConst(tree); } } return true; #endif case GT_CNS_INT: ssize_t ival; ival = tree->AsIntCon()->gtIconVal; ssize_t imask; imask = 0; switch (dstt) { case TYP_BYTE: imask = 0x0000007F; break; case TYP_BOOL: case TYP_UBYTE: imask = 0x000000FF; break; case TYP_SHORT: imask = 0x00007FFF; break; case TYP_USHORT: imask = 0x0000FFFF; break; #ifdef TARGET_64BIT case TYP_INT: imask = 0x7FFFFFFF; break; case TYP_UINT: imask = 0xFFFFFFFF; break; #endif // TARGET_64BIT default: return false; } if ((ival & imask) != ival) { return false; } #ifdef TARGET_64BIT if (doit) { tree->gtType = TYP_INT; tree->AsIntCon()->gtIconVal = (int)ival; if (vnStore != nullptr) { fgValueNumberTreeConst(tree); } } #endif // TARGET_64BIT return true; /* Operands that are in memory can usually be narrowed simply by changing their gtType */ case GT_LCL_VAR: /* We only allow narrowing long -> int for a GT_LCL_VAR */ if (dstSize == sizeof(int)) { goto NARROW_IND; } break; case GT_CLS_VAR: case GT_LCL_FLD: goto NARROW_IND; default: break; } noway_assert(doit == false); return false; } if (kind & (GTK_BINOP | GTK_UNOP)) { GenTree* op1; op1 = tree->AsOp()->gtOp1; GenTree* op2; op2 = tree->AsOp()->gtOp2; switch (tree->gtOper) { case GT_AND: noway_assert(genActualType(tree->gtType) == genActualType(op1->gtType)); noway_assert(genActualType(tree->gtType) == genActualType(op2->gtType)); GenTree* opToNarrow; opToNarrow = nullptr; GenTree** otherOpPtr; otherOpPtr = nullptr; bool foundOperandThatBlocksNarrowing; foundOperandThatBlocksNarrowing = false; // If 'dstt' is unsigned and one of the operands can be narrowed into 'dsst', // the result of the GT_AND will also fit into 'dstt' and can be narrowed. // The same is true if one of the operands is an int const and can be narrowed into 'dsst'. if (!gtIsActiveCSE_Candidate(op2) && ((op2->gtOper == GT_CNS_INT) || varTypeIsUnsigned(dstt))) { if (optNarrowTree(op2, srct, dstt, NoVNPair, false)) { opToNarrow = op2; otherOpPtr = &tree->AsOp()->gtOp1; } else { foundOperandThatBlocksNarrowing = true; } } if ((opToNarrow == nullptr) && !gtIsActiveCSE_Candidate(op1) && ((op1->gtOper == GT_CNS_INT) || varTypeIsUnsigned(dstt))) { if (optNarrowTree(op1, srct, dstt, NoVNPair, false)) { opToNarrow = op1; otherOpPtr = &tree->AsOp()->gtOp2; } else { foundOperandThatBlocksNarrowing = true; } } if (opToNarrow != nullptr) { // We will change the type of the tree and narrow opToNarrow // if (doit) { tree->gtType = genActualType(dstt); tree->SetVNs(vnpNarrow); optNarrowTree(opToNarrow, srct, dstt, NoVNPair, true); // We may also need to cast away the upper bits of *otherOpPtr if (srcSize == 8) { assert(tree->gtType == TYP_INT); GenTree* castOp = gtNewCastNode(TYP_INT, *otherOpPtr, false, TYP_INT); #ifdef DEBUG castOp->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif *otherOpPtr = castOp; } } return true; } if (foundOperandThatBlocksNarrowing) { noway_assert(doit == false); return false; } goto COMMON_BINOP; case GT_ADD: case GT_MUL: if (tree->gtOverflow() || varTypeIsSmall(dstt)) { noway_assert(doit == false); return false; } FALLTHROUGH; case GT_OR: case GT_XOR: noway_assert(genActualType(tree->gtType) == genActualType(op1->gtType)); noway_assert(genActualType(tree->gtType) == genActualType(op2->gtType)); COMMON_BINOP: if (gtIsActiveCSE_Candidate(op1) || gtIsActiveCSE_Candidate(op2) || !optNarrowTree(op1, srct, dstt, NoVNPair, doit) || !optNarrowTree(op2, srct, dstt, NoVNPair, doit)) { noway_assert(doit == false); return false; } /* Simply change the type of the tree */ if (doit) { if (tree->gtOper == GT_MUL && (tree->gtFlags & GTF_MUL_64RSLT)) { tree->gtFlags &= ~GTF_MUL_64RSLT; } tree->gtType = genActualType(dstt); tree->SetVNs(vnpNarrow); } return true; case GT_IND: NARROW_IND: if ((dstSize > genTypeSize(tree->gtType)) && (varTypeIsUnsigned(dstt) && !varTypeIsUnsigned(tree->gtType))) { return false; } /* Simply change the type of the tree */ if (doit && (dstSize <= genTypeSize(tree->gtType))) { if (!varTypeIsSmall(dstt)) { dstt = varTypeToSigned(dstt); } tree->gtType = dstt; tree->SetVNs(vnpNarrow); /* Make sure we don't mess up the variable type */ if ((oper == GT_LCL_VAR) || (oper == GT_LCL_FLD)) { tree->gtFlags |= GTF_VAR_CAST; } } return true; case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GT: case GT_GE: /* These can always be narrowed since they only represent 0 or 1 */ return true; case GT_CAST: { var_types cast = tree->CastToType(); var_types oprt = op1->TypeGet(); unsigned oprSize = genTypeSize(oprt); if (cast != srct) { return false; } if (varTypeIsIntegralOrI(dstt) != varTypeIsIntegralOrI(oprt)) { return false; } if (tree->gtOverflow()) { return false; } /* Is this a cast from the type we're narrowing to or a smaller one? */ if (oprSize <= dstSize) { /* Bash the target type of the cast */ if (doit) { if (!varTypeIsSmall(dstt)) { dstt = varTypeToSigned(dstt); } if ((oprSize == dstSize) && ((varTypeIsUnsigned(dstt) == varTypeIsUnsigned(oprt)) || !varTypeIsSmall(dstt))) { // Same size and there is no signedness mismatch for small types: change the CAST // into a NOP JITDUMP("Cast operation has no effect, bashing [%06d] GT_CAST into a GT_NOP.\n", dspTreeID(tree)); tree->ChangeOper(GT_NOP); tree->gtType = dstt; // Clear the GTF_UNSIGNED flag, as it may have been set on the cast node tree->gtFlags &= ~GTF_UNSIGNED; tree->AsOp()->gtOp2 = nullptr; tree->gtVNPair = op1->gtVNPair; // Set to op1's ValueNumber } else { // oprSize is smaller or there is a signedness mismatch for small types // Change the CastToType in the GT_CAST node tree->CastToType() = dstt; // The result type of a GT_CAST is never a small type. // Use genActualType to widen dstt when it is a small types. tree->gtType = genActualType(dstt); tree->SetVNs(vnpNarrow); } } return true; } } return false; case GT_COMMA: if (!gtIsActiveCSE_Candidate(op2) && optNarrowTree(op2, srct, dstt, vnpNarrow, doit)) { /* Simply change the type of the tree */ if (doit) { tree->gtType = genActualType(dstt); tree->SetVNs(vnpNarrow); } return true; } return false; default: noway_assert(doit == false); return false; } } return false; } /***************************************************************************** * * The following logic figures out whether the given variable is assigned * somewhere in a list of basic blocks (or in an entire loop). */ Compiler::fgWalkResult Compiler::optIsVarAssgCB(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; if (tree->OperIsSsaDef()) { isVarAssgDsc* desc = (isVarAssgDsc*)data->pCallbackData; assert(desc && desc->ivaSelf == desc); GenTree* dest = nullptr; if (tree->OperIs(GT_CALL)) { desc->ivaMaskCall = optCallInterf(tree->AsCall()); dest = tree->AsCall()->GetLclRetBufArgNode(); if (dest == nullptr) { return WALK_CONTINUE; } dest = dest->AsOp()->gtOp1; } else { dest = tree->AsOp()->gtOp1; } genTreeOps destOper = dest->OperGet(); if (destOper == GT_LCL_VAR) { unsigned tvar = dest->AsLclVarCommon()->GetLclNum(); if (tvar < lclMAX_ALLSET_TRACKED) { AllVarSetOps::AddElemD(data->compiler, desc->ivaMaskVal, tvar); } else { desc->ivaMaskIncomplete = true; } if (tvar == desc->ivaVar) { if (tree != desc->ivaSkip) { return WALK_ABORT; } } } else if (destOper == GT_LCL_FLD) { /* We can't track every field of every var. Moreover, indirections may access different parts of the var as different (but overlapping) fields. So just treat them as indirect accesses */ // unsigned lclNum = dest->AsLclFld()->GetLclNum(); // noway_assert(lvaTable[lclNum].lvAddrTaken); varRefKinds refs = varTypeIsGC(tree->TypeGet()) ? VR_IND_REF : VR_IND_SCL; desc->ivaMaskInd = varRefKinds(desc->ivaMaskInd | refs); } else if (destOper == GT_CLS_VAR) { desc->ivaMaskInd = varRefKinds(desc->ivaMaskInd | VR_GLB_VAR); } else if (destOper == GT_IND) { /* Set the proper indirection bits */ varRefKinds refs = varTypeIsGC(tree->TypeGet()) ? VR_IND_REF : VR_IND_SCL; desc->ivaMaskInd = varRefKinds(desc->ivaMaskInd | refs); } } return WALK_CONTINUE; } /*****************************************************************************/ bool Compiler::optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var) { bool result; isVarAssgDsc desc; desc.ivaSkip = skip; #ifdef DEBUG desc.ivaSelf = &desc; #endif desc.ivaVar = var; desc.ivaMaskCall = CALLINT_NONE; AllVarSetOps::AssignNoCopy(this, desc.ivaMaskVal, AllVarSetOps::MakeEmpty(this)); for (;;) { noway_assert(beg != nullptr); for (Statement* const stmt : beg->Statements()) { if (fgWalkTreePre(stmt->GetRootNodePointer(), optIsVarAssgCB, &desc) != WALK_CONTINUE) { result = true; goto DONE; } } if (beg == end) { break; } beg = beg->bbNext; } result = false; DONE: return result; } /***************************************************************************** * Is "var" assigned in the loop "lnum" ? */ bool Compiler::optIsVarAssgLoop(unsigned lnum, unsigned var) { assert(lnum < optLoopCount); if (var < lclMAX_ALLSET_TRACKED) { ALLVARSET_TP vs(AllVarSetOps::MakeSingleton(this, var)); return optIsSetAssgLoop(lnum, vs) != 0; } else { return optIsVarAssigned(optLoopTable[lnum].lpHead->bbNext, optLoopTable[lnum].lpBottom, nullptr, var); } } /*****************************************************************************/ int Compiler::optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds) { noway_assert(lnum < optLoopCount); LoopDsc* loop = &optLoopTable[lnum]; /* Do we already know what variables are assigned within this loop? */ if (!(loop->lpFlags & LPFLG_ASGVARS_YES)) { isVarAssgDsc desc; /* Prepare the descriptor used by the tree walker call-back */ desc.ivaVar = (unsigned)-1; desc.ivaSkip = nullptr; #ifdef DEBUG desc.ivaSelf = &desc; #endif AllVarSetOps::AssignNoCopy(this, desc.ivaMaskVal, AllVarSetOps::MakeEmpty(this)); desc.ivaMaskInd = VR_NONE; desc.ivaMaskCall = CALLINT_NONE; desc.ivaMaskIncomplete = false; /* Now walk all the statements of the loop */ for (BasicBlock* const block : loop->LoopBlocks()) { for (Statement* const stmt : block->NonPhiStatements()) { fgWalkTreePre(stmt->GetRootNodePointer(), optIsVarAssgCB, &desc); if (desc.ivaMaskIncomplete) { loop->lpFlags |= LPFLG_ASGVARS_INC; } } } AllVarSetOps::Assign(this, loop->lpAsgVars, desc.ivaMaskVal); loop->lpAsgInds = desc.ivaMaskInd; loop->lpAsgCall = desc.ivaMaskCall; /* Now we know what variables are assigned in the loop */ loop->lpFlags |= LPFLG_ASGVARS_YES; } /* Now we can finally test the caller's mask against the loop's */ if (!AllVarSetOps::IsEmptyIntersection(this, loop->lpAsgVars, vars) || (loop->lpAsgInds & inds)) { return 1; } switch (loop->lpAsgCall) { case CALLINT_ALL: /* Can't hoist if the call might have side effect on an indirection. */ if (loop->lpAsgInds != VR_NONE) { return 1; } break; case CALLINT_REF_INDIRS: /* Can't hoist if the call might have side effect on an ref indirection. */ if (loop->lpAsgInds & VR_IND_REF) { return 1; } break; case CALLINT_SCL_INDIRS: /* Can't hoist if the call might have side effect on an non-ref indirection. */ if (loop->lpAsgInds & VR_IND_SCL) { return 1; } break; case CALLINT_ALL_INDIRS: /* Can't hoist if the call might have side effect on any indirection. */ if (loop->lpAsgInds & (VR_IND_REF | VR_IND_SCL)) { return 1; } break; case CALLINT_NONE: /* Other helpers kill nothing */ break; default: noway_assert(!"Unexpected lpAsgCall value"); } return 0; } void Compiler::optPerformHoistExpr(GenTree* origExpr, BasicBlock* exprBb, unsigned lnum) { assert(exprBb != nullptr); #ifdef DEBUG if (verbose) { printf("\nHoisting a copy of "); printTreeID(origExpr); printf(" from " FMT_BB " into PreHeader " FMT_BB " for loop " FMT_LP " <" FMT_BB ".." FMT_BB ">:\n", exprBb->bbNum, optLoopTable[lnum].lpHead->bbNum, lnum, optLoopTable[lnum].lpTop->bbNum, optLoopTable[lnum].lpBottom->bbNum); gtDispTree(origExpr); printf("\n"); } #endif // Create a copy of the expression and mark it for CSE's. GenTree* hoistExpr = gtCloneExpr(origExpr, GTF_MAKE_CSE); // The hoist Expr does not have to computed into a specific register, // so clear the RegNum if it was set in the original expression hoistExpr->ClearRegNum(); // Copy any loop memory dependence. optCopyLoopMemoryDependence(origExpr, hoistExpr); // At this point we should have a cloned expression, marked with the GTF_MAKE_CSE flag assert(hoistExpr != origExpr); assert(hoistExpr->gtFlags & GTF_MAKE_CSE); GenTree* hoist = hoistExpr; // The value of the expression isn't used (unless it's an assignment). if (hoistExpr->OperGet() != GT_ASG) { hoist = gtUnusedValNode(hoistExpr); } /* Put the statement in the preheader */ INDEBUG(optLoopTable[lnum].lpValidatePreHeader()); BasicBlock* preHead = optLoopTable[lnum].lpHead; // fgMorphTree requires that compCurBB be the block that contains // (or in this case, will contain) the expression. compCurBB = preHead; hoist = fgMorphTree(hoist); preHead->bbFlags |= (exprBb->bbFlags & (BBF_HAS_IDX_LEN | BBF_HAS_NULLCHECK)); Statement* hoistStmt = gtNewStmt(hoist); // Simply append the statement at the end of the preHead's list. Statement* firstStmt = preHead->firstStmt(); if (firstStmt != nullptr) { /* append after last statement */ Statement* lastStmt = preHead->lastStmt(); assert(lastStmt->GetNextStmt() == nullptr); lastStmt->SetNextStmt(hoistStmt); hoistStmt->SetPrevStmt(lastStmt); firstStmt->SetPrevStmt(hoistStmt); } else { /* Empty pre-header - store the single statement in the block */ preHead->bbStmtList = hoistStmt; hoistStmt->SetPrevStmt(hoistStmt); } hoistStmt->SetNextStmt(nullptr); #ifdef DEBUG if (verbose) { printf("This hoisted copy placed in PreHeader (" FMT_BB "):\n", preHead->bbNum); gtDispTree(hoist); printf("\n"); } #endif if (fgStmtListThreaded) { gtSetStmtInfo(hoistStmt); fgSetStmtSeq(hoistStmt); } #ifdef DEBUG if (m_nodeTestData != nullptr) { // What is the depth of the loop "lnum"? ssize_t depth = 0; unsigned lnumIter = lnum; while (optLoopTable[lnumIter].lpParent != BasicBlock::NOT_IN_LOOP) { depth++; lnumIter = optLoopTable[lnumIter].lpParent; } NodeToTestDataMap* testData = GetNodeTestData(); TestLabelAndNum tlAndN; if (testData->Lookup(origExpr, &tlAndN) && tlAndN.m_tl == TL_LoopHoist) { if (tlAndN.m_num == -1) { printf("Node "); printTreeID(origExpr); printf(" was declared 'do not hoist', but is being hoisted.\n"); assert(false); } else if (tlAndN.m_num != depth) { printf("Node "); printTreeID(origExpr); printf(" was declared as hoistable from loop at nesting depth %d; actually hoisted from loop at depth " "%d.\n", tlAndN.m_num, depth); assert(false); } else { // We've correctly hoisted this, so remove the annotation. Later, we'll check for any remaining "must // hoist" annotations. testData->Remove(origExpr); // Now we insert an annotation to make sure that "hoistExpr" is actually CSE'd. tlAndN.m_tl = TL_CSE_Def; tlAndN.m_num = m_loopHoistCSEClass++; testData->Set(hoistExpr, tlAndN); } } } #endif #if LOOP_HOIST_STATS if (!m_curLoopHasHoistedExpression) { m_loopsWithHoistedExpressions++; m_curLoopHasHoistedExpression = true; } m_totalHoistedExpressions++; #endif // LOOP_HOIST_STATS } void Compiler::optHoistLoopCode() { // If we don't have any loops in the method then take an early out now. if (optLoopCount == 0) { JITDUMP("\nNo loops; no hoisting\n"); return; } #ifdef DEBUG unsigned jitNoHoist = JitConfig.JitNoHoist(); if (jitNoHoist > 0) { JITDUMP("\nJitNoHoist set; no hoisting\n"); return; } #endif #if 0 // The code in this #if has been useful in debugging loop hoisting issues, by // enabling selective enablement of the loop hoisting optimization according to // method hash. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("loophoisthashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); // methHashLo = (unsigned(atoi(lostr)) << 2); // So we don't have to use negative numbers. } char* histr = getenv("loophoisthashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); // methHashHi = (unsigned(atoi(histr)) << 2); // So we don't have to use negative numbers. } if (methHash < methHashLo || methHash > methHashHi) return; printf("Doing loop hoisting in %s (0x%x).\n", info.compFullName, methHash); #endif // DEBUG #endif // 0 -- debugging loop hoisting issues #ifdef DEBUG if (verbose) { printf("\n*************** In optHoistLoopCode()\n"); printf("Blocks/Trees before phase\n"); fgDispBasicBlocks(true); fgDispHandlerTab(); optPrintLoopTable(); } #endif // Consider all the loop nests, in outer-to-inner order (thus hoisting expressions outside the largest loop in which // they are invariant.) LoopHoistContext hoistCtxt(this); for (unsigned lnum = 0; lnum < optLoopCount; lnum++) { if (optLoopTable[lnum].lpFlags & LPFLG_REMOVED) { JITDUMP("\nLoop " FMT_LP " was removed\n", lnum); continue; } if (optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP) { optHoistLoopNest(lnum, &hoistCtxt); } } #if DEBUG if (fgModified) { if (verbose) { printf("Blocks/Trees after optHoistLoopCode() modified flowgraph\n"); fgDispBasicBlocks(true); printf(""); } // Make sure that the predecessor lists are accurate fgDebugCheckBBlist(); } #endif #ifdef DEBUG // Test Data stuff.. // If we have no test data, early out. if (m_nodeTestData == nullptr) { return; } NodeToTestDataMap* testData = GetNodeTestData(); for (NodeToTestDataMap::KeyIterator ki = testData->Begin(); !ki.Equal(testData->End()); ++ki) { TestLabelAndNum tlAndN; GenTree* node = ki.Get(); bool b = testData->Lookup(node, &tlAndN); assert(b); if (tlAndN.m_tl != TL_LoopHoist) { continue; } // Otherwise, it is a loop hoist annotation. assert(tlAndN.m_num < 100); // >= 100 indicates nested static field address, should already have been moved. if (tlAndN.m_num >= 0) { printf("Node "); printTreeID(node); printf(" was declared 'must hoist', but has not been hoisted.\n"); assert(false); } } #endif // DEBUG } void Compiler::optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt) { // Do this loop, then recursively do all nested loops. JITDUMP("\n%s " FMT_LP "\n", optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP ? "Loop Nest" : "Nested Loop", lnum); #if LOOP_HOIST_STATS // Record stats m_curLoopHasHoistedExpression = false; m_loopsConsidered++; #endif // LOOP_HOIST_STATS optHoistThisLoop(lnum, hoistCtxt); VNSet* hoistedInCurLoop = hoistCtxt->ExtractHoistedInCurLoop(); if (optLoopTable[lnum].lpChild != BasicBlock::NOT_IN_LOOP) { // Add the ones hoisted in "lnum" to "hoistedInParents" for any nested loops. // TODO-Cleanup: we should have a set abstraction for loops. if (hoistedInCurLoop != nullptr) { for (VNSet::KeyIterator keys = hoistedInCurLoop->Begin(); !keys.Equal(hoistedInCurLoop->End()); ++keys) { #ifdef DEBUG bool b; assert(!hoistCtxt->m_hoistedInParentLoops.Lookup(keys.Get(), &b)); #endif hoistCtxt->m_hoistedInParentLoops.Set(keys.Get(), true); } } for (unsigned child = optLoopTable[lnum].lpChild; child != BasicBlock::NOT_IN_LOOP; child = optLoopTable[child].lpSibling) { optHoistLoopNest(child, hoistCtxt); } // Now remove them. // TODO-Cleanup: we should have a set abstraction for loops. if (hoistedInCurLoop != nullptr) { for (VNSet::KeyIterator keys = hoistedInCurLoop->Begin(); !keys.Equal(hoistedInCurLoop->End()); ++keys) { // Note that we asserted when we added these that they hadn't been members, so removing is appropriate. hoistCtxt->m_hoistedInParentLoops.Remove(keys.Get()); } } } } void Compiler::optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt) { LoopDsc* pLoopDsc = &optLoopTable[lnum]; /* If loop was removed continue */ if (pLoopDsc->lpFlags & LPFLG_REMOVED) { JITDUMP(" ... not hoisting " FMT_LP ": removed\n", lnum); return; } // Ensure the per-loop sets/tables are empty. hoistCtxt->m_curLoopVnInvariantCache.RemoveAll(); #ifdef DEBUG if (verbose) { printf("optHoistThisLoop for loop " FMT_LP " <" FMT_BB ".." FMT_BB ">:\n", lnum, pLoopDsc->lpTop->bbNum, pLoopDsc->lpBottom->bbNum); printf(" Loop body %s a call\n", (pLoopDsc->lpFlags & LPFLG_CONTAINS_CALL) ? "contains" : "does not contain"); printf(" Loop has %s\n", (pLoopDsc->lpExitCnt == 1) ? "single exit" : "multiple exits"); } #endif VARSET_TP loopVars(VarSetOps::Intersection(this, pLoopDsc->lpVarInOut, pLoopDsc->lpVarUseDef)); pLoopDsc->lpVarInOutCount = VarSetOps::Count(this, pLoopDsc->lpVarInOut); pLoopDsc->lpLoopVarCount = VarSetOps::Count(this, loopVars); pLoopDsc->lpHoistedExprCount = 0; #ifndef TARGET_64BIT unsigned longVarsCount = VarSetOps::Count(this, lvaLongVars); if (longVarsCount > 0) { // Since 64-bit variables take up two registers on 32-bit targets, we increase // the Counts such that each TYP_LONG variable counts twice. // VARSET_TP loopLongVars(VarSetOps::Intersection(this, loopVars, lvaLongVars)); VARSET_TP inOutLongVars(VarSetOps::Intersection(this, pLoopDsc->lpVarInOut, lvaLongVars)); #ifdef DEBUG if (verbose) { printf("\n LONGVARS(%d)=", VarSetOps::Count(this, lvaLongVars)); lvaDispVarSet(lvaLongVars); } #endif pLoopDsc->lpLoopVarCount += VarSetOps::Count(this, loopLongVars); pLoopDsc->lpVarInOutCount += VarSetOps::Count(this, inOutLongVars); } #endif // !TARGET_64BIT #ifdef DEBUG if (verbose) { printf("\n USEDEF (%d)=", VarSetOps::Count(this, pLoopDsc->lpVarUseDef)); lvaDispVarSet(pLoopDsc->lpVarUseDef); printf("\n INOUT (%d)=", pLoopDsc->lpVarInOutCount); lvaDispVarSet(pLoopDsc->lpVarInOut); printf("\n LOOPVARS(%d)=", pLoopDsc->lpLoopVarCount); lvaDispVarSet(loopVars); printf("\n"); } #endif unsigned floatVarsCount = VarSetOps::Count(this, lvaFloatVars); if (floatVarsCount > 0) { VARSET_TP loopFPVars(VarSetOps::Intersection(this, loopVars, lvaFloatVars)); VARSET_TP inOutFPVars(VarSetOps::Intersection(this, pLoopDsc->lpVarInOut, lvaFloatVars)); pLoopDsc->lpLoopVarFPCount = VarSetOps::Count(this, loopFPVars); pLoopDsc->lpVarInOutFPCount = VarSetOps::Count(this, inOutFPVars); pLoopDsc->lpHoistedFPExprCount = 0; pLoopDsc->lpLoopVarCount -= pLoopDsc->lpLoopVarFPCount; pLoopDsc->lpVarInOutCount -= pLoopDsc->lpVarInOutFPCount; #ifdef DEBUG if (verbose) { printf(" INOUT-FP(%d)=", pLoopDsc->lpVarInOutFPCount); lvaDispVarSet(inOutFPVars); printf("\n LOOPV-FP(%d)=", pLoopDsc->lpLoopVarFPCount); lvaDispVarSet(loopFPVars); printf("\n"); } #endif } else // (floatVarsCount == 0) { pLoopDsc->lpLoopVarFPCount = 0; pLoopDsc->lpVarInOutFPCount = 0; pLoopDsc->lpHoistedFPExprCount = 0; } // Find the set of definitely-executed blocks. // Ideally, the definitely-executed blocks are the ones that post-dominate the entry block. // Until we have post-dominators, we'll special-case for single-exit blocks. // // Todo: it is not clear if this is a correctness requirement or a profitability heuristic. // It seems like the latter. Ideally have enough safeguards to prevent hoisting exception // or side-effect dependent things. // // We really should consider hoisting from conditionally executed blocks, if they are frequently executed // and it is safe to evaluate the tree early. // // In particular if we have a loop nest, when scanning the outer loop we should consider hoisting from blocks // in enclosed loops. However, this is likely to scale poorly, and we really should instead start // hoisting inner to outer. // ArrayStack<BasicBlock*> defExec(getAllocatorLoopHoist()); if (pLoopDsc->lpExitCnt == 1) { assert(pLoopDsc->lpExit != nullptr); JITDUMP(" Only considering hoisting in blocks that dominate exit block " FMT_BB "\n", pLoopDsc->lpExit->bbNum); BasicBlock* cur = pLoopDsc->lpExit; // Push dominators, until we reach "entry" or exit the loop. while (cur != nullptr && pLoopDsc->lpContains(cur) && cur != pLoopDsc->lpEntry) { defExec.Push(cur); cur = cur->bbIDom; } // If we didn't reach the entry block, give up and *just* push the entry block. if (cur != pLoopDsc->lpEntry) { JITDUMP(" -- odd, we didn't reach entry from exit via dominators. Only considering hoisting in entry " "block " FMT_BB "\n", pLoopDsc->lpEntry->bbNum); defExec.Reset(); } defExec.Push(pLoopDsc->lpEntry); } else // More than one exit { JITDUMP(" only considering hoisting in entry block " FMT_BB "\n", pLoopDsc->lpEntry->bbNum); // We'll assume that only the entry block is definitely executed. // We could in the future do better. defExec.Push(pLoopDsc->lpEntry); } optHoistLoopBlocks(lnum, &defExec, hoistCtxt); } bool Compiler::optIsProfitableToHoistTree(GenTree* tree, unsigned lnum) { LoopDsc* pLoopDsc = &optLoopTable[lnum]; bool loopContainsCall = (pLoopDsc->lpFlags & LPFLG_CONTAINS_CALL) != 0; int availRegCount; int hoistedExprCount; int loopVarCount; int varInOutCount; if (varTypeIsFloating(tree)) { hoistedExprCount = pLoopDsc->lpHoistedFPExprCount; loopVarCount = pLoopDsc->lpLoopVarFPCount; varInOutCount = pLoopDsc->lpVarInOutFPCount; availRegCount = CNT_CALLEE_SAVED_FLOAT; if (!loopContainsCall) { availRegCount += CNT_CALLEE_TRASH_FLOAT - 1; } #ifdef TARGET_ARM // For ARM each double takes two FP registers // For now on ARM we won't track singles/doubles // and instead just assume that we always have doubles. // availRegCount /= 2; #endif } else { hoistedExprCount = pLoopDsc->lpHoistedExprCount; loopVarCount = pLoopDsc->lpLoopVarCount; varInOutCount = pLoopDsc->lpVarInOutCount; availRegCount = CNT_CALLEE_SAVED - 1; if (!loopContainsCall) { availRegCount += CNT_CALLEE_TRASH - 1; } #ifndef TARGET_64BIT // For our 32-bit targets Long types take two registers. if (varTypeIsLong(tree->TypeGet())) { availRegCount = (availRegCount + 1) / 2; } #endif } // decrement the availRegCount by the count of expression that we have already hoisted. availRegCount -= hoistedExprCount; // the variables that are read/written inside the loop should // always be a subset of the InOut variables for the loop assert(loopVarCount <= varInOutCount); // When loopVarCount >= availRegCount we believe that all of the // available registers will get used to hold LclVars inside the loop. // This pessimistically assumes that each loopVar has a conflicting // lifetime with every other loopVar. // For this case we will hoist the expression only if is profitable // to place it in a stack home location (GetCostEx() >= 2*IND_COST_EX) // as we believe it will be placed in the stack or one of the other // loopVars will be spilled into the stack // if (loopVarCount >= availRegCount) { // Don't hoist expressions that are not heavy: tree->GetCostEx() < (2*IND_COST_EX) if (tree->GetCostEx() < (2 * IND_COST_EX)) { JITDUMP(" tree cost too low: %d < %d (loopVarCount %u >= availableRegCount %u)\n", tree->GetCostEx(), 2 * IND_COST_EX, loopVarCount, availRegCount); return false; } } // When varInOutCount < availRegCount we are know that there are // some available register(s) when we enter the loop body. // When varInOutCount == availRegCount there often will be a register // available when we enter the loop body, since a loop often defines a // LclVar on exit or there is often at least one LclVar that is worth // spilling to the stack to make way for this hoisted expression. // So we are willing hoist an expression with GetCostEx() == MIN_CSE_COST // if (varInOutCount > availRegCount) { // Don't hoist expressions that barely meet CSE cost requirements: tree->GetCostEx() == MIN_CSE_COST if (tree->GetCostEx() <= MIN_CSE_COST + 1) { JITDUMP(" tree not good CSE: %d <= %d (varInOutCount %u > availableRegCount %u)\n", tree->GetCostEx(), 2 * MIN_CSE_COST + 1, varInOutCount, availRegCount) return false; } } return true; } //------------------------------------------------------------------------ // optRecordLoopMemoryDependence: record that tree's value number // is dependent on a particular memory VN // // Arguments: // tree -- tree in question // block -- block containing tree // memoryVN -- VN for a "map" from a select operation encounterd // while computing the tree's VN // // Notes: // Only tracks trees in loops, and memory updates in the same loop nest. // So this is a coarse-grained dependence that is only usable for // hoisting tree out of its enclosing loops. // void Compiler::optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN) { // If tree is not in a loop, we don't need to track its loop dependence. // unsigned const loopNum = block->bbNatLoopNum; if (loopNum == BasicBlock::NOT_IN_LOOP) { return; } // Find the loop associated with this memory VN. // unsigned updateLoopNum = vnStore->LoopOfVN(memoryVN); if (updateLoopNum >= BasicBlock::MAX_LOOP_NUM) { // There should be only two special non-loop loop nums. // assert((updateLoopNum == BasicBlock::MAX_LOOP_NUM) || (updateLoopNum == BasicBlock::NOT_IN_LOOP)); // memoryVN defined outside of any loop, we can ignore. // JITDUMP(" ==> Not updating loop memory dependence of [%06u], memory " FMT_VN " not defined in a loop\n", dspTreeID(tree), memoryVN); return; } // If the loop was removed, then record the dependence in the nearest enclosing loop, if any. // while ((optLoopTable[updateLoopNum].lpFlags & LPFLG_REMOVED) != 0) { unsigned const updateParentLoopNum = optLoopTable[updateLoopNum].lpParent; if (updateParentLoopNum == BasicBlock::NOT_IN_LOOP) { // Memory VN was defined in a loop, but no longer. // JITDUMP(" ==> Not updating loop memory dependence of [%06u], memory " FMT_VN " no longer defined in a loop\n", dspTreeID(tree), memoryVN); break; } JITDUMP(" ==> " FMT_LP " removed, updating dependence to parent " FMT_LP "\n", updateLoopNum, updateParentLoopNum); updateLoopNum = updateParentLoopNum; } // If the update block is not the the header of a loop containing // block, we can also ignore the update. // if (!optLoopContains(updateLoopNum, loopNum)) { JITDUMP(" ==> Not updating loop memory dependence of [%06u]/" FMT_LP ", memory " FMT_VN "/" FMT_LP " is not defined in an enclosing loop\n", dspTreeID(tree), loopNum, memoryVN, updateLoopNum); return; } // If we already have a recorded a loop entry block for this // tree, see if the new update is for a more closely nested // loop. // NodeToLoopMemoryBlockMap* const map = GetNodeToLoopMemoryBlockMap(); BasicBlock* mapBlock = nullptr; if (map->Lookup(tree, &mapBlock)) { unsigned const mapLoopNum = mapBlock->bbNatLoopNum; // If the update loop contains the existing map loop, // the existing map loop is more constraining. So no // update needed. // if (optLoopContains(updateLoopNum, mapLoopNum)) { JITDUMP(" ==> Not updating loop memory dependence of [%06u]; alrady constrained to " FMT_LP " nested in " FMT_LP "\n", dspTreeID(tree), mapLoopNum, updateLoopNum); return; } } // MemoryVN now describes the most constraining loop memory dependence // we know of. Update the map. // JITDUMP(" ==> Updating loop memory dependence of [%06u] to " FMT_LP "\n", dspTreeID(tree), updateLoopNum); map->Set(tree, optLoopTable[updateLoopNum].lpEntry, NodeToLoopMemoryBlockMap::Overwrite); } //------------------------------------------------------------------------ // optCopyLoopMemoryDependence: record that tree's loop memory dependence // is the same as some other tree. // // Arguments: // fromTree -- tree to copy dependence from // toTree -- tree in question // void Compiler::optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree) { NodeToLoopMemoryBlockMap* const map = GetNodeToLoopMemoryBlockMap(); BasicBlock* mapBlock = nullptr; if (map->Lookup(fromTree, &mapBlock)) { map->Set(toTree, mapBlock); } } //------------------------------------------------------------------------ // optHoistLoopBlocks: Hoist invariant expression out of the loop. // // Arguments: // loopNum - The number of the loop // blocks - A stack of blocks belonging to the loop // hoistContext - The loop hoist context // // Assumptions: // The `blocks` stack contains the definitely-executed blocks in // the loop, in the execution order, starting with the loop entry // block on top of the stack. // void Compiler::optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext) { class HoistVisitor : public GenTreeVisitor<HoistVisitor> { class Value { GenTree* m_node; public: bool m_hoistable; bool m_cctorDependent; bool m_invariant; #ifdef DEBUG const char* m_failReason; #endif Value(GenTree* node) : m_node(node), m_hoistable(false), m_cctorDependent(false), m_invariant(false) { #ifdef DEBUG m_failReason = "unset"; #endif } GenTree* Node() { return m_node; } }; ArrayStack<Value> m_valueStack; bool m_beforeSideEffect; unsigned m_loopNum; LoopHoistContext* m_hoistContext; BasicBlock* m_currentBlock; bool IsNodeHoistable(GenTree* node) { // TODO-CQ: This is a more restrictive version of a check that optIsCSEcandidate already does - it allows // a struct typed node if a class handle can be recovered from it. if (node->TypeGet() == TYP_STRUCT) { return false; } // Tree must be a suitable CSE candidate for us to be able to hoist it. return m_compiler->optIsCSEcandidate(node); } bool IsTreeVNInvariant(GenTree* tree) { ValueNum vn = tree->gtVNPair.GetLiberal(); bool vnIsInvariant = m_compiler->optVNIsLoopInvariant(vn, m_loopNum, &m_hoistContext->m_curLoopVnInvariantCache); // Even though VN is invariant in the loop (say a constant) its value may depend on position // of tree, so for loop hoisting we must also check that any memory read by tree // is also invariant in the loop. // if (vnIsInvariant) { vnIsInvariant = IsTreeLoopMemoryInvariant(tree); } return vnIsInvariant; } //------------------------------------------------------------------------ // IsTreeLoopMemoryInvariant: determine if the value number of tree // is dependent on the tree being executed within the current loop // // Arguments: // tree -- tree in question // // Returns: // true if tree could be evaluated just before loop and get the // same value. // // Note: // Calls are optimistically assumed to be invariant. // Caller must do their own analysis for these tree types. // bool IsTreeLoopMemoryInvariant(GenTree* tree) { if (tree->IsCall()) { // Calls are handled specially by hoisting, and loop memory dependence // must be checked by other means. // return true; } NodeToLoopMemoryBlockMap* const map = m_compiler->GetNodeToLoopMemoryBlockMap(); BasicBlock* loopEntryBlock = nullptr; if (map->Lookup(tree, &loopEntryBlock)) { for (MemoryKind memoryKind : allMemoryKinds()) { ValueNum loopMemoryVN = m_compiler->GetMemoryPerSsaData(loopEntryBlock->bbMemorySsaNumIn[memoryKind]) ->m_vnPair.GetLiberal(); if (!m_compiler->optVNIsLoopInvariant(loopMemoryVN, m_loopNum, &m_hoistContext->m_curLoopVnInvariantCache)) { return false; } } } return true; } public: enum { ComputeStack = false, DoPreOrder = true, DoPostOrder = true, DoLclVarsOnly = false, UseExecutionOrder = true, }; HoistVisitor(Compiler* compiler, unsigned loopNum, LoopHoistContext* hoistContext) : GenTreeVisitor(compiler) , m_valueStack(compiler->getAllocator(CMK_LoopHoist)) , m_beforeSideEffect(true) , m_loopNum(loopNum) , m_hoistContext(hoistContext) , m_currentBlock(nullptr) { } void HoistBlock(BasicBlock* block) { m_currentBlock = block; for (Statement* const stmt : block->NonPhiStatements()) { WalkTree(stmt->GetRootNodePointer(), nullptr); Value& top = m_valueStack.TopRef(); assert(top.Node() == stmt->GetRootNode()); if (top.m_hoistable) { m_compiler->optHoistCandidate(stmt->GetRootNode(), block, m_loopNum, m_hoistContext); } else { JITDUMP(" [%06u] not %s: %s\n", dspTreeID(top.Node()), top.m_invariant ? "invariant" : "hoistable", top.m_failReason); } m_valueStack.Reset(); } // Only unconditionally executed blocks in the loop are visited (see optHoistThisLoop) // so after we're done visiting the first block we need to assume the worst, that the // blocks that are not visisted have side effects. m_beforeSideEffect = false; } fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { GenTree* node = *use; m_valueStack.Emplace(node); return fgWalkResult::WALK_CONTINUE; } fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { GenTree* tree = *use; if (tree->OperIsLocal()) { GenTreeLclVarCommon* lclVar = tree->AsLclVarCommon(); unsigned lclNum = lclVar->GetLclNum(); // To be invariant a LclVar node must not be the LHS of an assignment ... bool isInvariant = !user->OperIs(GT_ASG) || (user->AsOp()->gtGetOp1() != tree); // and the variable must be in SSA ... isInvariant = isInvariant && m_compiler->lvaInSsa(lclNum) && lclVar->HasSsaName(); // and the SSA definition must be outside the loop we're hoisting from ... isInvariant = isInvariant && !m_compiler->optLoopTable[m_loopNum].lpContains( m_compiler->lvaGetDesc(lclNum)->GetPerSsaData(lclVar->GetSsaNum())->GetBlock()); // and the VN of the tree is considered invariant as well. // // TODO-CQ: This VN invariance check should not be necessary and in some cases it is conservative - it // is possible that the SSA def is outside the loop but VN does not understand what the node is doing // (e.g. LCL_FLD-based type reinterpretation) and assigns a "new, unique VN" to the node. This VN is // associated with the block where the node is, a loop block, and thus the VN is considered to not be // invariant. // On the other hand, it is possible for a SSA def to be inside the loop yet the use to be invariant, // if the defining expression is also invariant. In such a case the VN invariance would help but it is // blocked by the SSA invariance check. isInvariant = isInvariant && IsTreeVNInvariant(tree); Value& top = m_valueStack.TopRef(); assert(top.Node() == tree); if (isInvariant) { top.m_invariant = true; // In general it doesn't make sense to hoist a local node but there are exceptions, for example // LCL_FLD nodes (because then the variable cannot be enregistered and the node always turns // into a memory access). top.m_hoistable = IsNodeHoistable(tree); } #ifdef DEBUG if (!isInvariant) { top.m_failReason = "local, not rvalue / not in SSA / defined within current loop"; } else if (!top.m_hoistable) { top.m_failReason = "not handled by cse"; } #endif return fgWalkResult::WALK_CONTINUE; } // Initclass CLS_VARs and IconHandles are the base cases of cctor dependent trees. // In the IconHandle case, it's of course the dereference, rather than the constant itself, that is // truly dependent on the cctor. So a more precise approach would be to separately propagate // isCctorDependent and isAddressWhoseDereferenceWouldBeCctorDependent, but we don't for // simplicity/throughput; the constant itself would be considered non-hoistable anyway, since // optIsCSEcandidate returns false for constants. bool treeIsCctorDependent = ((tree->OperIs(GT_CLS_VAR) && ((tree->gtFlags & GTF_CLS_VAR_INITCLASS) != 0)) || (tree->OperIs(GT_CNS_INT) && ((tree->gtFlags & GTF_ICON_INITCLASS) != 0))); bool treeIsInvariant = true; bool treeHasHoistableChildren = false; int childCount; #ifdef DEBUG const char* failReason = "unknown"; #endif for (childCount = 0; m_valueStack.TopRef(childCount).Node() != tree; childCount++) { Value& child = m_valueStack.TopRef(childCount); if (child.m_hoistable) { treeHasHoistableChildren = true; } if (!child.m_invariant) { treeIsInvariant = false; INDEBUG(failReason = "variant child";) } if (child.m_cctorDependent) { // Normally, a parent of a cctor-dependent tree is also cctor-dependent. treeIsCctorDependent = true; // Check for the case where we can stop propagating cctor-dependent upwards. if (tree->OperIs(GT_COMMA) && (child.Node() == tree->gtGetOp2())) { GenTree* op1 = tree->gtGetOp1(); if (op1->OperIs(GT_CALL)) { GenTreeCall* call = op1->AsCall(); if ((call->gtCallType == CT_HELPER) && s_helperCallProperties.MayRunCctor(eeGetHelperNum(call->gtCallMethHnd))) { // Hoisting the comma is ok because it would hoist the initialization along // with the static field reference. treeIsCctorDependent = false; // Hoisting the static field without hoisting the initialization would be // incorrect, make sure we consider the field (which we flagged as // cctor-dependent) non-hoistable. noway_assert(!child.m_hoistable); } } } } } // If all the children of "tree" are hoistable, then "tree" itself can be hoisted, // unless it has a static var reference that can't be hoisted past its cctor call. bool treeIsHoistable = treeIsInvariant && !treeIsCctorDependent; #ifdef DEBUG if (treeIsInvariant && !treeIsHoistable) { failReason = "cctor dependent"; } #endif // But we must see if anything else prevents "tree" from being hoisted. // if (treeIsInvariant) { if (treeIsHoistable) { treeIsHoistable = IsNodeHoistable(tree); if (!treeIsHoistable) { INDEBUG(failReason = "not handled by cse";) } } // If it's a call, it must be a helper call, and be pure. // Further, if it may run a cctor, it must be labeled as "Hoistable" // (meaning it won't run a cctor because the class is not precise-init). if (treeIsHoistable && tree->IsCall()) { GenTreeCall* call = tree->AsCall(); if (call->gtCallType != CT_HELPER) { INDEBUG(failReason = "non-helper call";) treeIsHoistable = false; } else { CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd); if (!s_helperCallProperties.IsPure(helpFunc)) { INDEBUG(failReason = "impure helper call";) treeIsHoistable = false; } else if (s_helperCallProperties.MayRunCctor(helpFunc) && ((call->gtFlags & GTF_CALL_HOISTABLE) == 0)) { INDEBUG(failReason = "non-hoistable helper call";) treeIsHoistable = false; } } } if (treeIsHoistable) { if (!m_beforeSideEffect) { // For now, we give up on an expression that might raise an exception if it is after the // first possible global side effect (and we assume we're after that if we're not in the first // block). // TODO-CQ: this is when we might do loop cloning. // if ((tree->gtFlags & GTF_EXCEPT) != 0) { INDEBUG(failReason = "side effect ordering constraint";) treeIsHoistable = false; } } } // Is the value of the whole tree loop invariant? treeIsInvariant = IsTreeVNInvariant(tree); // Is the value of the whole tree loop invariant? if (!treeIsInvariant) { // Here we have a tree that is not loop invariant and we thus cannot hoist INDEBUG(failReason = "tree VN is loop variant";) treeIsHoistable = false; } } // Next check if we need to set 'm_beforeSideEffect' to false. // // If we have already set it to false then we can skip these checks // if (m_beforeSideEffect) { // Is the value of the whole tree loop invariant? if (!treeIsInvariant) { // We have a tree that is not loop invariant and we thus cannot hoist assert(treeIsHoistable == false); // Check if we should clear m_beforeSideEffect. // If 'tree' can throw an exception then we need to set m_beforeSideEffect to false. // Note that calls are handled below if (tree->OperMayThrow(m_compiler) && !tree->IsCall()) { m_beforeSideEffect = false; } } // In the section below, we only care about memory side effects. We assume that expressions will // be hoisted so that they are evaluated in the same order as they would have been in the loop, // and therefore throw exceptions in the same order. // if (tree->IsCall()) { // If it's a call, it must be a helper call that does not mutate the heap. // Further, if it may run a cctor, it must be labeled as "Hoistable" // (meaning it won't run a cctor because the class is not precise-init). GenTreeCall* call = tree->AsCall(); if (call->gtCallType != CT_HELPER) { m_beforeSideEffect = false; } else { CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd); if (s_helperCallProperties.MutatesHeap(helpFunc)) { m_beforeSideEffect = false; } else if (s_helperCallProperties.MayRunCctor(helpFunc) && (call->gtFlags & GTF_CALL_HOISTABLE) == 0) { m_beforeSideEffect = false; } // Additional check for helper calls that throw exceptions if (!treeIsInvariant) { // We have a tree that is not loop invariant and we thus cannot hoist assert(treeIsHoistable == false); // Does this helper call throw? if (!s_helperCallProperties.NoThrow(helpFunc)) { m_beforeSideEffect = false; } } } } else if (tree->OperIs(GT_ASG)) { // If the LHS of the assignment has a global reference, then assume it's a global side effect. GenTree* lhs = tree->AsOp()->gtOp1; if (lhs->gtFlags & GTF_GLOB_REF) { m_beforeSideEffect = false; } } else if (tree->OperIs(GT_XADD, GT_XORR, GT_XAND, GT_XCHG, GT_LOCKADD, GT_CMPXCHG, GT_MEMORYBARRIER)) { // If this node is a MEMORYBARRIER or an Atomic operation // then don't hoist and stop any further hoisting after this node INDEBUG(failReason = "atomic op or memory barrier";) treeIsHoistable = false; m_beforeSideEffect = false; } } // If this 'tree' is hoistable then we return and the caller will // decide to hoist it as part of larger hoistable expression. // if (!treeIsHoistable && treeHasHoistableChildren) { // The current tree is not hoistable but it has hoistable children that we need // to hoist now. // // In order to preserve the original execution order, we also need to hoist any // other hoistable trees that we encountered so far. // At this point the stack contains (in top to bottom order): // - the current node's children // - the current node // - ancestors of the current node and some of their descendants // // The ancestors have not been visited yet in post order so they're not hoistable // (and they cannot become hoistable because the current node is not) but some of // their descendants may have already been traversed and be hoistable. // // The execution order is actually bottom to top so we'll start hoisting from // the bottom of the stack, skipping the current node (which is expected to not // be hoistable). // // Note that the treeHasHoistableChildren check avoids unnecessary stack traversing // and also prevents hoisting trees too early. If the current tree is not hoistable // and it doesn't have any hoistable children then there's no point in hoisting any // other trees. Doing so would interfere with the cctor dependent case, where the // cctor dependent node is initially not hoistable and may become hoistable later, // when its parent comma node is visited. // for (int i = 0; i < m_valueStack.Height(); i++) { Value& value = m_valueStack.BottomRef(i); if (value.m_hoistable) { assert(value.Node() != tree); // Don't hoist this tree again. value.m_hoistable = false; value.m_invariant = false; m_compiler->optHoistCandidate(value.Node(), m_currentBlock, m_loopNum, m_hoistContext); } else if (value.Node() != tree) { JITDUMP(" [%06u] not %s: %s\n", dspTreeID(value.Node()), value.m_invariant ? "invariant" : "hoistable", value.m_failReason); } } } m_valueStack.Pop(childCount); Value& top = m_valueStack.TopRef(); assert(top.Node() == tree); top.m_hoistable = treeIsHoistable; top.m_cctorDependent = treeIsCctorDependent; top.m_invariant = treeIsInvariant; #ifdef DEBUG if (!top.m_invariant || !top.m_hoistable) { top.m_failReason = failReason; } #endif return fgWalkResult::WALK_CONTINUE; } }; LoopDsc* loopDsc = &optLoopTable[loopNum]; assert(blocks->Top() == loopDsc->lpEntry); HoistVisitor visitor(this, loopNum, hoistContext); while (!blocks->Empty()) { BasicBlock* block = blocks->Pop(); weight_t blockWeight = block->getBBWeight(this); JITDUMP("\n optHoistLoopBlocks " FMT_BB " (weight=%6s) of loop " FMT_LP " <" FMT_BB ".." FMT_BB ">\n", block->bbNum, refCntWtd2str(blockWeight), loopNum, loopDsc->lpTop->bbNum, loopDsc->lpBottom->bbNum); if (blockWeight < (BB_UNITY_WEIGHT / 10)) { JITDUMP(" block weight is too small to perform hoisting.\n"); continue; } visitor.HoistBlock(block); } } void Compiler::optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt) { assert(lnum != BasicBlock::NOT_IN_LOOP); // It must pass the hoistable profitablity tests for this loop level if (!optIsProfitableToHoistTree(tree, lnum)) { JITDUMP(" ... not profitable to hoist\n"); return; } if (hoistCtxt->m_hoistedInParentLoops.Lookup(tree->gtVNPair.GetLiberal())) { JITDUMP(" ... already hoisted same VN in parent\n"); // already hoisted in a parent loop, so don't hoist this expression. return; } if (hoistCtxt->GetHoistedInCurLoop(this)->Lookup(tree->gtVNPair.GetLiberal())) { JITDUMP(" ... already hoisted same VN in current\n"); // already hoisted this expression in the current loop, so don't hoist this expression. return; } // Create a loop pre-header in which to put the hoisted code. fgCreateLoopPreHeader(lnum); // If the block we're hoisting from and the pre-header are in different EH regions, don't hoist. // TODO: we could probably hoist things that won't raise exceptions, such as constants. if (!BasicBlock::sameTryRegion(optLoopTable[lnum].lpHead, treeBb)) { JITDUMP(" ... not hoisting in " FMT_LP ", eh region constraint (pre-header try index %d, candidate " FMT_BB " try index %d\n", lnum, optLoopTable[lnum].lpHead->bbTryIndex, treeBb->bbNum, treeBb->bbTryIndex); return; } // Expression can be hoisted optPerformHoistExpr(tree, treeBb, lnum); // Increment lpHoistedExprCount or lpHoistedFPExprCount if (!varTypeIsFloating(tree->TypeGet())) { optLoopTable[lnum].lpHoistedExprCount++; #ifndef TARGET_64BIT // For our 32-bit targets Long types take two registers. if (varTypeIsLong(tree->TypeGet())) { optLoopTable[lnum].lpHoistedExprCount++; } #endif } else // Floating point expr hoisted { optLoopTable[lnum].lpHoistedFPExprCount++; } // Record the hoisted expression in hoistCtxt hoistCtxt->GetHoistedInCurLoop(this)->Set(tree->gtVNPair.GetLiberal(), true); } bool Compiler::optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* loopVnInvariantCache) { // If it is not a VN, is not loop-invariant. if (vn == ValueNumStore::NoVN) { return false; } // We'll always short-circuit constants. if (vnStore->IsVNConstant(vn) || vn == vnStore->VNForVoid()) { return true; } // If we've done this query previously, don't repeat. bool previousRes = false; if (loopVnInvariantCache->Lookup(vn, &previousRes)) { return previousRes; } bool res = true; VNFuncApp funcApp; if (vnStore->GetVNFunc(vn, &funcApp)) { if (funcApp.m_func == VNF_PhiDef) { // Is the definition within the loop? If so, is not loop-invariant. unsigned lclNum = funcApp.m_args[0]; unsigned ssaNum = funcApp.m_args[1]; LclSsaVarDsc* ssaDef = lvaTable[lclNum].GetPerSsaData(ssaNum); res = !optLoopContains(lnum, ssaDef->GetBlock()->bbNatLoopNum); } else if (funcApp.m_func == VNF_PhiMemoryDef) { BasicBlock* defnBlk = reinterpret_cast<BasicBlock*>(vnStore->ConstantValue<ssize_t>(funcApp.m_args[0])); res = !optLoopContains(lnum, defnBlk->bbNatLoopNum); } else if (funcApp.m_func == VNF_MemOpaque) { const unsigned vnLoopNum = funcApp.m_args[0]; // Check for the special "ambiguous" loop MemOpaque VN. // This is considered variant in every loop. // if (vnLoopNum == BasicBlock::MAX_LOOP_NUM) { res = false; } else { res = !optLoopContains(lnum, vnLoopNum); } } else { for (unsigned i = 0; i < funcApp.m_arity; i++) { // 4th arg of mapStore identifies the loop where the store happens. // if (funcApp.m_func == VNF_MapStore) { assert(funcApp.m_arity == 4); if (i == 3) { const unsigned vnLoopNum = funcApp.m_args[3]; res = !optLoopContains(lnum, vnLoopNum); break; } } // TODO-CQ: We need to either make sure that *all* VN functions // always take VN args, or else have a list of arg positions to exempt, as implicitly // constant. if (!optVNIsLoopInvariant(funcApp.m_args[i], lnum, loopVnInvariantCache)) { res = false; break; } } } } loopVnInvariantCache->Set(vn, res); return res; } //------------------------------------------------------------------------------ // fgCreateLoopPreHeader: Creates a pre-header block for the given loop. // A pre-header is a block outside the loop that falls through or branches to the loop // entry block. It is the only non-loop predecessor block to the entry block (thus, it // dominates the entry block). The pre-header replaces the current lpHead in the loop table. // The pre-header will be placed immediately before the loop top block, which is the first // block of the loop in program order. // // Once a loop has a pre-header, calling this function will immediately return without // creating another. // // If there already exists a block that meets the pre-header requirements, that block is marked // as a pre-header, and no flow graph modification is made. // // Note that the pre-header block can be in a different EH region from blocks in the loop, including the // entry block. Code doing hoisting is required to check the EH legality of hoisting to the pre-header // before doing so. // // Since the flow graph has changed, if needed, fgUpdateChangedFlowGraph() should be called after this // to update the block numbers, reachability, and dominators. The loop table does not need to be rebuilt. // The new pre-header block does have a copy of the previous 'head' reachability set, but the pre-header // itself doesn't exist in any reachability/dominator sets. `fgDominate` has code to specifically // handle queries about the pre-header dominating other blocks, even without re-computing dominators. // The preds lists have been maintained. // // Currently, if you create a pre-header but don't put any code in it, any subsequent fgUpdateFlowGraph() // pass might choose to compact the empty pre-header with a predecessor block. That is, a pre-header // block might disappear if not used. // // The code does not depend on the order of the BasicBlock bbNum. // // Arguments: // lnum - loop index // void Compiler::fgCreateLoopPreHeader(unsigned lnum) { #ifdef DEBUG if (verbose) { printf("*************** In fgCreateLoopPreHeader for " FMT_LP "\n", lnum); } #endif // DEBUG LoopDsc& loop = optLoopTable[lnum]; // Have we already created a loop-preheader block? if (loop.lpFlags & LPFLG_HAS_PREHEAD) { JITDUMP(" pre-header already exists\n"); INDEBUG(loop.lpValidatePreHeader()); return; } BasicBlock* head = loop.lpHead; BasicBlock* top = loop.lpTop; BasicBlock* entry = loop.lpEntry; // Ensure that lpHead always dominates lpEntry noway_assert(fgDominate(head, entry)); // If `head` is already a valid pre-header, then mark it so. if (head->GetUniqueSucc() == entry) { // The loop entry must have a single non-loop predecessor, which is the pre-header. bool loopHasProperEntryBlockPreds = true; for (BasicBlock* const predBlock : entry->PredBlocks()) { if (head == predBlock) { continue; } const bool intraLoopPred = optLoopContains(lnum, predBlock->bbNatLoopNum); if (!intraLoopPred) { loopHasProperEntryBlockPreds = false; break; } } if (loopHasProperEntryBlockPreds) { // Does this existing region have the same EH region index that we will use when we create the pre-header? // If not, we want to create a new pre-header with the expected region. bool headHasCorrectEHRegion = false; if ((top->bbFlags & BBF_TRY_BEG) != 0) { assert(top->hasTryIndex()); unsigned newTryIndex = ehTrueEnclosingTryIndexIL(top->getTryIndex()); unsigned compareTryIndex = head->hasTryIndex() ? head->getTryIndex() : EHblkDsc::NO_ENCLOSING_INDEX; headHasCorrectEHRegion = newTryIndex == compareTryIndex; } else { headHasCorrectEHRegion = BasicBlock::sameTryRegion(head, top); } if (headHasCorrectEHRegion) { JITDUMP(" converting existing header " FMT_BB " into pre-header\n", head->bbNum); loop.lpFlags |= LPFLG_HAS_PREHEAD; assert((head->bbFlags & BBF_LOOP_PREHEADER) == 0); // It isn't already a loop pre-header head->bbFlags |= BBF_LOOP_PREHEADER; INDEBUG(loop.lpValidatePreHeader()); INDEBUG(fgDebugCheckLoopTable()); return; } else { JITDUMP(" existing head " FMT_BB " doesn't have correct EH region\n", head->bbNum); } } else { JITDUMP(" existing head " FMT_BB " isn't unique non-loop predecessor of loop entry\n", head->bbNum); } } else { JITDUMP(" existing head " FMT_BB " doesn't have unique successor branching to loop entry\n", head->bbNum); } // Allocate a new basic block for the pre-header. const bool isTopEntryLoop = loop.lpIsTopEntry(); BasicBlock* preHead = bbNewBasicBlock(isTopEntryLoop ? BBJ_NONE : BBJ_ALWAYS); preHead->bbFlags |= BBF_INTERNAL | BBF_LOOP_PREHEADER; if (!isTopEntryLoop) { preHead->bbJumpDest = entry; } // Must set IL code offset preHead->bbCodeOffs = top->bbCodeOffs; // Set the default value of the preHead weight in case we don't have // valid profile data and since this blocks weight is just an estimate // we clear any BBF_PROF_WEIGHT flag that we may have picked up from head. // preHead->inheritWeight(head); preHead->bbFlags &= ~BBF_PROF_WEIGHT; // Copy the bbReach set from head for the new preHead block preHead->bbReach = BlockSetOps::MakeEmpty(this); BlockSetOps::Assign(this, preHead->bbReach, head->bbReach); // Also include 'head' in the preHead bbReach set BlockSetOps::AddElemD(this, preHead->bbReach, head->bbNum); #ifdef DEBUG if (verbose) { printf("\nCreated PreHeader (" FMT_BB ") for loop " FMT_LP " (" FMT_BB " - " FMT_BB, preHead->bbNum, lnum, top->bbNum, loop.lpBottom->bbNum); if (!isTopEntryLoop) { printf(", entry " FMT_BB, entry->bbNum); } printf("), with weight = %s\n", refCntWtd2str(preHead->getBBWeight(this))); } #endif // The preheader block is part of the containing loop (if any). preHead->bbNatLoopNum = loop.lpParent; if (fgIsUsingProfileWeights() && (head->bbJumpKind == BBJ_COND)) { if ((head->bbWeight == BB_ZERO_WEIGHT) || (entry->bbWeight == BB_ZERO_WEIGHT)) { preHead->bbWeight = BB_ZERO_WEIGHT; preHead->bbFlags |= BBF_RUN_RARELY; } else { // Allow for either the fall-through or branch to target 'entry'. BasicBlock* skipLoopBlock; if (head->bbNext == entry) { skipLoopBlock = head->bbJumpDest; } else { skipLoopBlock = head->bbNext; } assert(skipLoopBlock != entry); bool allValidProfileWeights = (head->hasProfileWeight() && skipLoopBlock->hasProfileWeight() && entry->hasProfileWeight()); if (allValidProfileWeights) { weight_t loopEnteredCount = 0; weight_t loopSkippedCount = 0; bool useEdgeWeights = fgHaveValidEdgeWeights; if (useEdgeWeights) { const flowList* edgeToEntry = fgGetPredForBlock(entry, head); const flowList* edgeToSkipLoop = fgGetPredForBlock(skipLoopBlock, head); noway_assert(edgeToEntry != nullptr); noway_assert(edgeToSkipLoop != nullptr); loopEnteredCount = (edgeToEntry->edgeWeightMin() + edgeToEntry->edgeWeightMax()) / 2.0; loopSkippedCount = (edgeToSkipLoop->edgeWeightMin() + edgeToSkipLoop->edgeWeightMax()) / 2.0; // Watch out for cases where edge weights were not properly maintained // so that it appears no profile flow enters the loop. // useEdgeWeights = !fgProfileWeightsConsistent(loopEnteredCount, BB_ZERO_WEIGHT); } if (!useEdgeWeights) { loopEnteredCount = entry->bbWeight; loopSkippedCount = skipLoopBlock->bbWeight; } weight_t loopTakenRatio = loopEnteredCount / (loopEnteredCount + loopSkippedCount); JITDUMP("%s edge weights; loopEnterCount " FMT_WT " loopSkipCount " FMT_WT " taken ratio " FMT_WT "\n", fgHaveValidEdgeWeights ? (useEdgeWeights ? "valid" : "ignored") : "invalid", loopEnteredCount, loopSkippedCount, loopTakenRatio); // Calculate a good approximation of the preHead's block weight weight_t preHeadWeight = (head->bbWeight * loopTakenRatio); preHead->setBBProfileWeight(preHeadWeight); noway_assert(!preHead->isRunRarely()); } } } // Link in the preHead block fgInsertBBbefore(top, preHead); // Ideally we would re-run SSA and VN if we optimized by doing loop hoisting. // However, that is too expensive at this point. Instead, we update the phi // node block references, if we created pre-header block due to hoisting. // This is sufficient because any definition participating in SSA that flowed // into the phi via the loop header block will now flow through the preheader // block from the header block. for (Statement* const stmt : top->Statements()) { GenTree* tree = stmt->GetRootNode(); if (tree->OperGet() != GT_ASG) { break; } GenTree* op2 = tree->gtGetOp2(); if (op2->OperGet() != GT_PHI) { break; } for (GenTreePhi::Use& use : op2->AsPhi()->Uses()) { GenTreePhiArg* phiArg = use.GetNode()->AsPhiArg(); if (phiArg->gtPredBB == head) { phiArg->gtPredBB = preHead; } } } // In which EH region should the pre-header live? // // The pre-header block is added immediately before `top`. // // The `top` block cannot be the first block of a filter or handler: `top` must have a back-edge from a // BBJ_COND or BBJ_ALWAYS within the loop, and a filter or handler cannot be branched to like that. // // The `top` block can be the first block of a `try` region, and you can fall into or branch to the // first block of a `try` region. (For top-entry loops, `top` will both be the target of a back-edge // and a fall-through from the previous block.) // // If the `top` block is NOT the first block of a `try` region, the pre-header can simply extend the // `top` block region. // // If the `top` block IS the first block of a `try`, we find its parent region and use that. For mutual-protect // regions, we need to find the actual parent, as the block stores the most "nested" mutual region. For // non-mutual-protect regions, due to EH canonicalization, we are guaranteed that no other EH regions begin // on the same block, so looking to just the parent is sufficient. Note that we can't just extend the EH // region of `top` to the pre-header, because `top` will still be the target of backward branches from // within the loop. If those backward branches come from outside the `try` (say, only the top half of the loop // is a `try` region), then we can't branch to a non-first `try` region block (you always must entry the `try` // in the first block). // // Note that hoisting any code out of a try region, for example, to a pre-header block in a different // EH region, needs to ensure that no exceptions will be thrown. assert(!fgIsFirstBlockOfFilterOrHandler(top)); if ((top->bbFlags & BBF_TRY_BEG) != 0) { // `top` is the beginning of a try block. Figure out the EH region to use. assert(top->hasTryIndex()); unsigned short newTryIndex = (unsigned short)ehTrueEnclosingTryIndexIL(top->getTryIndex()); if (newTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { // No EH try index. preHead->clearTryIndex(); } else { preHead->setTryIndex(newTryIndex); } // What handler region to use? Use the same handler region as `top`. preHead->copyHndIndex(top); } else { // `top` is not the beginning of a try block. Just extend the EH region to the pre-header. // We don't need to call `fgExtendEHRegionBefore()` because all the special handling that function // does it to account for `top` being the first block of a `try` or handler region, which we know // is not true. preHead->copyEHRegion(top); } // TODO-CQ: set dominators for this block, to allow loop optimizations requiring them // (e.g: hoisting expression in a loop with the same 'head' as this one) // Update the loop table loop.lpHead = preHead; loop.lpFlags |= LPFLG_HAS_PREHEAD; // The new block becomes the 'head' of the loop - update bbRefs and bbPreds. // All non-loop predecessors of 'entry' now jump to 'preHead'. preHead->bbRefs = 0; bool checkNestedLoops = false; for (BasicBlock* const predBlock : entry->PredBlocks()) { // Is the predBlock in the loop? // // We want to use: // const bool intraLoopPred = loop.lpContains(predBlock); // but we can't depend on the bbNum ordering. // // Previously, this code wouldn't redirect predecessors dominated by the entry. However, that can // lead to a case where non-loop predecessor is dominated by the loop entry, and that predecessor // continues to branch to the entry, not the new pre-header. This is normally ok for hoisting // because it will introduce an SSA PHI def within the loop, which will inhibit hoisting. However, // it complicates the definition of what a pre-header is. const bool intraLoopPred = optLoopContains(lnum, predBlock->bbNatLoopNum); if (intraLoopPred) { if (predBlock != loop.lpBottom) { checkNestedLoops = true; } continue; } switch (predBlock->bbJumpKind) { case BBJ_NONE: // This 'entry' predecessor that isn't dominated by 'entry' must be outside the loop, // meaning it must be fall-through to 'entry', and we must have a top-entry loop. noway_assert((entry == top) && (predBlock == head) && (predBlock->bbNext == preHead)); fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); break; case BBJ_COND: if (predBlock->bbJumpDest == entry) { predBlock->bbJumpDest = preHead; noway_assert(predBlock->bbNext != preHead); } else { noway_assert((entry == top) && (predBlock == head) && (predBlock->bbNext == preHead)); } fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); break; case BBJ_ALWAYS: case BBJ_EHCATCHRET: noway_assert(predBlock->bbJumpDest == entry); predBlock->bbJumpDest = preHead; fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); break; case BBJ_SWITCH: unsigned jumpCnt; jumpCnt = predBlock->bbJumpSwt->bbsCount; BasicBlock** jumpTab; jumpTab = predBlock->bbJumpSwt->bbsDstTab; do { assert(*jumpTab); if ((*jumpTab) == entry) { (*jumpTab) = preHead; fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); } } while (++jumpTab, --jumpCnt); UpdateSwitchTableTarget(predBlock, entry, preHead); break; default: noway_assert(!"Unexpected bbJumpKind"); break; } } flowList* const edgeToPreHeader = fgGetPredForBlock(preHead, head); noway_assert(edgeToPreHeader != nullptr); edgeToPreHeader->setEdgeWeights(preHead->bbWeight, preHead->bbWeight, preHead); noway_assert(fgGetPredForBlock(entry, preHead) == nullptr); flowList* const edgeFromPreHeader = fgAddRefPred(entry, preHead); edgeFromPreHeader->setEdgeWeights(preHead->bbWeight, preHead->bbWeight, entry); /* If we found at least one back-edge in the flowgraph pointing to the entry of the loop (other than the back-edge of the loop we are considering) then we likely have nested do-while loops with the same entry block and inserting the preheader block changes the head of all the nested loops. Now we will update this piece of information in the loop table, and mark all nested loops as having a preheader (the preheader block can be shared among all nested do-while loops with the same entry block). */ if (checkNestedLoops) { for (unsigned l = 0; l < optLoopCount; l++) { if (optLoopTable[l].lpHead == head) { // loop.lpHead was already changed from 'head' to 'preHead' noway_assert(l != lnum); // If it shares head, it must be a top-entry loop that shares top. noway_assert(optLoopTable[l].lpEntry == top); optUpdateLoopHead(l, optLoopTable[l].lpHead, preHead); optLoopTable[l].lpFlags |= LPFLG_HAS_PREHEAD; #ifdef DEBUG if (verbose) { printf("Same PreHeader (" FMT_BB ") can be used for loop " FMT_LP " (" FMT_BB " - " FMT_BB ")\n\n", preHead->bbNum, l, top->bbNum, optLoopTable[l].lpBottom->bbNum); } #endif } } } // We added a new block and altered the preds list; make sure the flow graph has been marked as being modified. assert(fgModified); #ifdef DEBUG fgDebugCheckBBlist(); fgVerifyHandlerTab(); fgDebugCheckLoopTable(); if (verbose) { JITDUMP("*************** After fgCreateLoopPreHeader for " FMT_LP "\n", lnum); fgDispBasicBlocks(); fgDispHandlerTab(); optPrintLoopTable(); } #endif } bool Compiler::optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum) { for (unsigned lnum = blk->bbNatLoopNum; lnum != BasicBlock::NOT_IN_LOOP; lnum = optLoopTable[lnum].lpParent) { if (optLoopTable[lnum].lpFlags & LPFLG_REMOVED) { continue; } if (optLoopTable[lnum].lpEntry == blk) { *pLnum = lnum; return true; } } return false; } void Compiler::optComputeLoopSideEffects() { unsigned lnum; for (lnum = 0; lnum < optLoopCount; lnum++) { VarSetOps::AssignNoCopy(this, optLoopTable[lnum].lpVarInOut, VarSetOps::MakeEmpty(this)); VarSetOps::AssignNoCopy(this, optLoopTable[lnum].lpVarUseDef, VarSetOps::MakeEmpty(this)); optLoopTable[lnum].lpFlags &= ~LPFLG_CONTAINS_CALL; } for (lnum = 0; lnum < optLoopCount; lnum++) { if (optLoopTable[lnum].lpFlags & LPFLG_REMOVED) { continue; } if (optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP) { // Is outermost... optComputeLoopNestSideEffects(lnum); } } VarSetOps::AssignNoCopy(this, lvaFloatVars, VarSetOps::MakeEmpty(this)); #ifndef TARGET_64BIT VarSetOps::AssignNoCopy(this, lvaLongVars, VarSetOps::MakeEmpty(this)); #endif for (unsigned i = 0; i < lvaCount; i++) { LclVarDsc* varDsc = lvaGetDesc(i); if (varDsc->lvTracked) { if (varTypeIsFloating(varDsc->lvType)) { VarSetOps::AddElemD(this, lvaFloatVars, varDsc->lvVarIndex); } #ifndef TARGET_64BIT else if (varTypeIsLong(varDsc->lvType)) { VarSetOps::AddElemD(this, lvaLongVars, varDsc->lvVarIndex); } #endif } } } void Compiler::optComputeLoopNestSideEffects(unsigned lnum) { JITDUMP("optComputeLoopNestSideEffects for " FMT_LP "\n", lnum); assert(optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP); // Requires: lnum is outermost. for (BasicBlock* const bbInLoop : optLoopTable[lnum].LoopBlocks()) { if (!optComputeLoopSideEffectsOfBlock(bbInLoop)) { // When optComputeLoopSideEffectsOfBlock returns false, we encountered // a block that was moved into the loop range (by fgReorderBlocks), // but not marked correctly as being inside the loop. // We conservatively mark this loop (and any outer loops) // as having memory havoc side effects. // // Record that all loops containing this block have memory havoc effects. // optRecordLoopNestsMemoryHavoc(lnum, fullMemoryKindSet); // All done, no need to keep visiting more blocks break; } } } void Compiler::optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc) { // We should start out with 'lnum' set to a valid natural loop index assert(lnum != BasicBlock::NOT_IN_LOOP); while (lnum != BasicBlock::NOT_IN_LOOP) { for (MemoryKind memoryKind : allMemoryKinds()) { if ((memoryHavoc & memoryKindSet(memoryKind)) != 0) { optLoopTable[lnum].lpLoopHasMemoryHavoc[memoryKind] = true; } } // Move lnum to the next outtermost loop that we need to mark lnum = optLoopTable[lnum].lpParent; } } bool Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk) { unsigned mostNestedLoop = blk->bbNatLoopNum; JITDUMP("optComputeLoopSideEffectsOfBlock " FMT_BB ", mostNestedLoop %d\n", blk->bbNum, mostNestedLoop); if (mostNestedLoop == BasicBlock::NOT_IN_LOOP) { return false; } AddVariableLivenessAllContainingLoops(mostNestedLoop, blk); // MemoryKinds for which an in-loop call or store has arbitrary effects. MemoryKindSet memoryHavoc = emptyMemoryKindSet; // Now iterate over the remaining statements, and their trees. for (Statement* const stmt : blk->NonPhiStatements()) { for (GenTree* const tree : stmt->TreeList()) { genTreeOps oper = tree->OperGet(); // Even after we set memoryHavoc we still may want to know if a loop contains calls if (memoryHavoc == fullMemoryKindSet) { if (oper == GT_CALL) { // Record that this loop contains a call AddContainsCallAllContainingLoops(mostNestedLoop); } // If we just set LPFLG_CONTAINS_CALL or it was previously set if (optLoopTable[mostNestedLoop].lpFlags & LPFLG_CONTAINS_CALL) { // We can early exit after both memoryHavoc and LPFLG_CONTAINS_CALL are both set to true. break; } // We are just looking for GT_CALL nodes after memoryHavoc was set. continue; } // otherwise memoryHavoc is not set for at least one heap ID assert(memoryHavoc != fullMemoryKindSet); // This body is a distillation of the memory side-effect code of value numbering. // We also do a very limited analysis if byref PtrTo values, to cover some cases // that the compiler creates. if (oper == GT_ASG) { GenTree* lhs = tree->AsOp()->gtOp1->gtEffectiveVal(/*commaOnly*/ true); if (lhs->OperGet() == GT_IND) { GenTree* arg = lhs->AsOp()->gtOp1->gtEffectiveVal(/*commaOnly*/ true); FieldSeqNode* fldSeqArrElem = nullptr; if ((tree->gtFlags & GTF_IND_VOLATILE) != 0) { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); continue; } ArrayInfo arrInfo; if (arg->TypeGet() == TYP_BYREF && arg->OperGet() == GT_LCL_VAR) { // If it's a local byref for which we recorded a value number, use that... GenTreeLclVar* argLcl = arg->AsLclVar(); if (lvaInSsa(argLcl->GetLclNum()) && argLcl->HasSsaName()) { ValueNum argVN = lvaTable[argLcl->GetLclNum()].GetPerSsaData(argLcl->GetSsaNum())->m_vnPair.GetLiberal(); VNFuncApp funcApp; if (argVN != ValueNumStore::NoVN && vnStore->GetVNFunc(argVN, &funcApp) && funcApp.m_func == VNF_PtrToArrElem) { assert(vnStore->IsVNHandle(funcApp.m_args[0])); CORINFO_CLASS_HANDLE elemType = CORINFO_CLASS_HANDLE(vnStore->ConstantValue<size_t>(funcApp.m_args[0])); AddModifiedElemTypeAllContainingLoops(mostNestedLoop, elemType); // Don't set memoryHavoc for GcHeap below. Do set memoryHavoc for ByrefExposed // (conservatively assuming that a byref may alias the array element) memoryHavoc |= memoryKindSet(ByrefExposed); continue; } } // Otherwise... memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } // Is the LHS an array index expression? else if (lhs->ParseArrayElemForm(this, &arrInfo, &fldSeqArrElem)) { // We actually ignore "fldSeq" -- any modification to an S[], at any // field of "S", will lose all information about the array type. CORINFO_CLASS_HANDLE elemTypeEq = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType); AddModifiedElemTypeAllContainingLoops(mostNestedLoop, elemTypeEq); // Conservatively assume byrefs may alias this array element memoryHavoc |= memoryKindSet(ByrefExposed); } else { GenTree* baseAddr = nullptr; FieldSeqNode* fldSeq = nullptr; if (arg->IsFieldAddr(this, &baseAddr, &fldSeq)) { assert((fldSeq != nullptr) && (fldSeq != FieldSeqStore::NotAField()) && !fldSeq->IsPseudoField()); FieldKindForVN fieldKind = (baseAddr != nullptr) ? FieldKindForVN::WithBaseAddr : FieldKindForVN::SimpleStatic; AddModifiedFieldAllContainingLoops(mostNestedLoop, fldSeq->GetFieldHandle(), fieldKind); // Conservatively assume byrefs may alias this object. memoryHavoc |= memoryKindSet(ByrefExposed); } else { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } } } else if (lhs->OperIsBlk()) { GenTreeLclVarCommon* lclVarTree; bool isEntire; if (!tree->DefinesLocal(this, &lclVarTree, &isEntire)) { // For now, assume arbitrary side effects on GcHeap/ByrefExposed... memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } else if (lvaVarAddrExposed(lclVarTree->GetLclNum())) { memoryHavoc |= memoryKindSet(ByrefExposed); } } else if (lhs->OperGet() == GT_CLS_VAR) { AddModifiedFieldAllContainingLoops(mostNestedLoop, lhs->AsClsVar()->gtClsVarHnd, FieldKindForVN::SimpleStatic); // Conservatively assume byrefs may alias this static field memoryHavoc |= memoryKindSet(ByrefExposed); } // Otherwise, must be local lhs form. I should assert that. else if (lhs->OperGet() == GT_LCL_VAR) { GenTreeLclVar* lhsLcl = lhs->AsLclVar(); GenTree* rhs = tree->AsOp()->gtOp2; ValueNum rhsVN = rhs->gtVNPair.GetLiberal(); // If we gave the RHS a value number, propagate it. if (rhsVN != ValueNumStore::NoVN) { rhsVN = vnStore->VNNormalValue(rhsVN); if (lvaInSsa(lhsLcl->GetLclNum()) && lhsLcl->HasSsaName()) { lvaTable[lhsLcl->GetLclNum()] .GetPerSsaData(lhsLcl->GetSsaNum()) ->m_vnPair.SetLiberal(rhsVN); } } // If the local is address-exposed, count this as ByrefExposed havoc if (lvaVarAddrExposed(lhsLcl->GetLclNum())) { memoryHavoc |= memoryKindSet(ByrefExposed); } } } else // if (oper != GT_ASG) { switch (oper) { case GT_COMMA: tree->gtVNPair = tree->AsOp()->gtOp2->gtVNPair; break; case GT_ADDR: // Is it an addr of a array index expression? { GenTree* addrArg = tree->AsOp()->gtOp1; if (addrArg->OperGet() == GT_IND) { // Is the LHS an array index expression? if (addrArg->gtFlags & GTF_IND_ARR_INDEX) { ArrayInfo arrInfo; bool b = GetArrayInfoMap()->Lookup(addrArg, &arrInfo); assert(b); CORINFO_CLASS_HANDLE elemTypeEq = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType); ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL); ValueNum ptrToArrElemVN = vnStore->VNForFunc(TYP_BYREF, VNF_PtrToArrElem, elemTypeEqVN, // The rest are dummy arguments. vnStore->VNForNull(), vnStore->VNForNull(), vnStore->VNForNull()); tree->gtVNPair.SetBoth(ptrToArrElemVN); } } } break; #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: if (tree->AsHWIntrinsic()->OperIsMemoryStore()) { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } break; #endif // FEATURE_HW_INTRINSICS case GT_LOCKADD: case GT_XORR: case GT_XAND: case GT_XADD: case GT_XCHG: case GT_CMPXCHG: case GT_MEMORYBARRIER: case GT_STORE_DYN_BLK: { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } break; case GT_CALL: { GenTreeCall* call = tree->AsCall(); // Record that this loop contains a call AddContainsCallAllContainingLoops(mostNestedLoop); if (call->gtCallType == CT_HELPER) { CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd); if (s_helperCallProperties.MutatesHeap(helpFunc)) { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } else if (s_helperCallProperties.MayRunCctor(helpFunc)) { // If the call is labeled as "Hoistable", then we've checked the // class that would be constructed, and it is not precise-init, so // the cctor will not be run by this call. Otherwise, it might be, // and might have arbitrary side effects. if ((tree->gtFlags & GTF_CALL_HOISTABLE) == 0) { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } } } else { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } break; } default: // All other gtOper node kinds, leave 'memoryHavoc' unchanged (i.e. false) assert(!tree->OperRequiresAsgFlag()); break; } } } } if (memoryHavoc != emptyMemoryKindSet) { // Record that all loops containing this block have this kind of memoryHavoc effects. optRecordLoopNestsMemoryHavoc(mostNestedLoop, memoryHavoc); } return true; } // Marks the containsCall information to "lnum" and any parent loops. void Compiler::AddContainsCallAllContainingLoops(unsigned lnum) { #if FEATURE_LOOP_ALIGN // If this is the inner most loop, reset the LOOP_ALIGN flag // because a loop having call will not likely to benefit from // alignment if (optLoopTable[lnum].lpChild == BasicBlock::NOT_IN_LOOP) { BasicBlock* top = optLoopTable[lnum].lpTop; top->unmarkLoopAlign(this DEBUG_ARG("Loop with call")); } #endif assert(0 <= lnum && lnum < optLoopCount); while (lnum != BasicBlock::NOT_IN_LOOP) { optLoopTable[lnum].lpFlags |= LPFLG_CONTAINS_CALL; lnum = optLoopTable[lnum].lpParent; } } // Adds the variable liveness information for 'blk' to 'this' LoopDsc void Compiler::LoopDsc::AddVariableLiveness(Compiler* comp, BasicBlock* blk) { VarSetOps::UnionD(comp, this->lpVarInOut, blk->bbLiveIn); VarSetOps::UnionD(comp, this->lpVarInOut, blk->bbLiveOut); VarSetOps::UnionD(comp, this->lpVarUseDef, blk->bbVarUse); VarSetOps::UnionD(comp, this->lpVarUseDef, blk->bbVarDef); } // Adds the variable liveness information for 'blk' to "lnum" and any parent loops. void Compiler::AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk) { assert(0 <= lnum && lnum < optLoopCount); while (lnum != BasicBlock::NOT_IN_LOOP) { optLoopTable[lnum].AddVariableLiveness(this, blk); lnum = optLoopTable[lnum].lpParent; } } // Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops. void Compiler::AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind) { assert(0 <= lnum && lnum < optLoopCount); while (lnum != BasicBlock::NOT_IN_LOOP) { optLoopTable[lnum].AddModifiedField(this, fldHnd, fieldKind); lnum = optLoopTable[lnum].lpParent; } } // Adds "elemType" to the set of modified array element types of "lnum" and any parent loops. void Compiler::AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemClsHnd) { assert(0 <= lnum && lnum < optLoopCount); while (lnum != BasicBlock::NOT_IN_LOOP) { optLoopTable[lnum].AddModifiedElemType(this, elemClsHnd); lnum = optLoopTable[lnum].lpParent; } } //------------------------------------------------------------------------------ // optRemoveRangeCheck : Given an indexing node, mark it as not needing a range check. // // Arguments: // check - Range check tree, the raw CHECK node (ARRAY, SIMD or HWINTRINSIC). // comma - GT_COMMA to which the "check" belongs, "nullptr" if the check is a standalone one. // stmt - Statement the indexing nodes belong to. // // Return Value: // Rewritten "check" - no-op if it has no side effects or the tree that contains them. // // Assumptions: // This method is capable of removing checks of two kinds: COMMA-based and standalone top-level ones. // In case of a COMMA-based check, "check" must be a non-null first operand of a non-null COMMA. // In case of a standalone check, "comma" must be null and "check" - "stmt"'s root. // GenTree* Compiler::optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt) { #if !REARRANGE_ADDS noway_assert(!"can't remove range checks without REARRANGE_ADDS right now"); #endif noway_assert(stmt != nullptr); noway_assert((comma != nullptr && comma->OperIs(GT_COMMA) && comma->gtGetOp1() == check) || (check != nullptr && check->OperIs(GT_BOUNDS_CHECK) && comma == nullptr)); noway_assert(check->OperIs(GT_BOUNDS_CHECK)); GenTree* tree = comma != nullptr ? comma : check; #ifdef DEBUG if (verbose) { printf("Before optRemoveRangeCheck:\n"); gtDispTree(tree); } #endif // Extract side effects GenTree* sideEffList = nullptr; gtExtractSideEffList(check, &sideEffList, GTF_ASG); if (sideEffList != nullptr) { // We've got some side effects. if (tree->OperIs(GT_COMMA)) { // Make the comma handle them. tree->AsOp()->gtOp1 = sideEffList; } else { // Make the statement execute them instead of the check. stmt->SetRootNode(sideEffList); tree = sideEffList; } } else { check->gtBashToNOP(); } if (tree->OperIs(GT_COMMA)) { // TODO-CQ: We should also remove the GT_COMMA, but in any case we can no longer CSE the GT_COMMA. tree->gtFlags |= GTF_DONT_CSE; } gtUpdateSideEffects(stmt, tree); // Recalculate the GetCostSz(), etc... gtSetStmtInfo(stmt); // Re-thread the nodes if necessary if (fgStmtListThreaded) { fgSetStmtSeq(stmt); } #ifdef DEBUG if (verbose) { // gtUpdateSideEffects can update the side effects for ancestors in the tree, so display the whole statement // tree, not just the sub-tree. printf("After optRemoveRangeCheck for [%06u]:\n", dspTreeID(tree)); gtDispTree(stmt->GetRootNode()); } #endif return check; } //------------------------------------------------------------------------------ // optRemoveStandaloneRangeCheck : A thin wrapper over optRemoveRangeCheck that removes standalone checks. // // Arguments: // check - The standalone top-level CHECK node. // stmt - The statement "check" is a root node of. // // Return Value: // If "check" has no side effects, it is retuned, bashed to a no-op. // If it has side effects, the tree that executes them is returned. // GenTree* Compiler::optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt) { assert(check != nullptr); assert(stmt != nullptr); assert(check == stmt->GetRootNode()); return optRemoveRangeCheck(check, nullptr, stmt); } //------------------------------------------------------------------------------ // optRemoveCommaBasedRangeCheck : A thin wrapper over optRemoveRangeCheck that removes COMMA-based checks. // // Arguments: // comma - GT_COMMA of which the first operand is the CHECK to be removed. // stmt - The statement "comma" belongs to. // void Compiler::optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt) { assert(comma != nullptr && comma->OperIs(GT_COMMA)); assert(stmt != nullptr); assert(comma->gtGetOp1()->OperIs(GT_BOUNDS_CHECK)); optRemoveRangeCheck(comma->gtGetOp1()->AsBoundsChk(), comma, stmt); } /***************************************************************************** * Return the scale in an array reference, given a pointer to the * multiplication node. */ ssize_t Compiler::optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk)) { assert(mul); assert(mul->gtOper == GT_MUL || mul->gtOper == GT_LSH); assert(mul->AsOp()->gtOp2->IsCnsIntOrI()); ssize_t scale = mul->AsOp()->gtOp2->AsIntConCommon()->IconValue(); if (mul->gtOper == GT_LSH) { scale = ((ssize_t)1) << scale; } GenTree* index = mul->AsOp()->gtOp1; if (index->gtOper == GT_MUL && index->AsOp()->gtOp2->IsCnsIntOrI()) { // case of two cascading multiplications for constant int (e.g. * 20 morphed to * 5 * 4): // When index->gtOper is GT_MUL and index->AsOp()->gtOp2->gtOper is GT_CNS_INT (i.e. * 5), // we can bump up the scale from 4 to 5*4, and then change index to index->AsOp()->gtOp1. // Otherwise, we cannot optimize it. We will simply keep the original scale and index. scale *= index->AsOp()->gtOp2->AsIntConCommon()->IconValue(); index = index->AsOp()->gtOp1; } assert(!bRngChk || index->gtOper != GT_COMMA); if (pIndex) { *pIndex = index; } return scale; } //----------------------------------------------------------------------------- // OptTestInfo: Member of OptBoolsDsc struct used to test if a GT_JTRUE or GT_RETURN node // is a boolean comparison // struct OptTestInfo { GenTree* testTree; // The root node of basic block with GT_JTRUE or GT_RETURN type to check boolean condition on GenTree* compTree; // The compare node (i.e. GT_EQ or GT_NE node) of the testTree bool isBool; // If the compTree is boolean expression }; //----------------------------------------------------------------------------- // OptBoolsDsc: Descriptor used for Boolean Optimization // class OptBoolsDsc { public: OptBoolsDsc(BasicBlock* b1, BasicBlock* b2, Compiler* comp) { m_b1 = b1; m_b2 = b2; m_b3 = nullptr; m_comp = comp; } private: BasicBlock* m_b1; // The first basic block with the BBJ_COND conditional jump type BasicBlock* m_b2; // The next basic block of m_b1. Either BBJ_COND or BBJ_RETURN type BasicBlock* m_b3; // m_b1->bbJumpDest. Null if m_b2 is not a return block. Compiler* m_comp; // The pointer to the Compiler instance OptTestInfo m_testInfo1; // The first test info OptTestInfo m_testInfo2; // The second test info GenTree* m_t3; // The root node of the first statement of m_b3 GenTree* m_c1; // The first operand of m_testInfo1.compTree GenTree* m_c2; // The first operand of m_testInfo2.compTree bool m_sameTarget; // if m_b1 and m_b2 jumps to the same destination genTreeOps m_foldOp; // The fold operator (e.g., GT_AND or GT_OR) var_types m_foldType; // The type of the folded tree genTreeOps m_cmpOp; // The comparison operator (e.g., GT_EQ or GT_NE) public: bool optOptimizeBoolsCondBlock(); bool optOptimizeBoolsReturnBlock(BasicBlock* b3); #ifdef DEBUG void optOptimizeBoolsGcStress(); #endif private: Statement* optOptimizeBoolsChkBlkCond(); GenTree* optIsBoolComp(OptTestInfo* pOptTest); bool optOptimizeBoolsChkTypeCostCond(); void optOptimizeBoolsUpdateTrees(); }; //----------------------------------------------------------------------------- // optOptimizeBoolsCondBlock: Optimize boolean when bbJumpKind of both m_b1 and m_b2 are BBJ_COND // // Returns: // true if boolean optimization is done and m_b1 and m_b2 are folded into m_b1, else false. // // Notes: // m_b1 and m_b2 are set on entry. // // Case 1: if b1.bbJumpDest == b2.bbJumpDest, it transforms // B1 : brtrue(t1, Bx) // B2 : brtrue(t2, Bx) // B3 : // to // B1 : brtrue(t1|t2, BX) // B3 : // // For example, (x == 0 && y == 0 && z == 0) generates // B1: GT_JTRUE (BBJ_COND), jump to B4 // B2: GT_JTRUE (BBJ_COND), jump to B4 // B3: GT_RETURN (BBJ_RETURN) // B4: GT_RETURN (BBJ_RETURN) // and B1 and B2 are folded into B1: // B1: GT_JTRUE (BBJ_COND), jump to B4 // B3: GT_RETURN (BBJ_RETURN) // B4: GT_RETURN (BBJ_RETURN) // // Case 2: if B1.bbJumpDest == B2->bbNext, it transforms // B1 : brtrue(t1, B3) // B2 : brtrue(t2, Bx) // B3 : // to // B1 : brtrue((!t1) && t2, Bx) // B3 : // bool OptBoolsDsc::optOptimizeBoolsCondBlock() { assert(m_b1 != nullptr && m_b2 != nullptr && m_b3 == nullptr); // Check if m_b1 and m_b2 jump to the same target and get back pointers to m_testInfo1 and t2 tree nodes m_t3 = nullptr; // Check if m_b1 and m_b2 have the same bbJumpDest if (m_b1->bbJumpDest == m_b2->bbJumpDest) { // Given the following sequence of blocks : // B1: brtrue(t1, BX) // B2: brtrue(t2, BX) // B3: // we will try to fold it to : // B1: brtrue(t1|t2, BX) // B3: m_sameTarget = true; } else if (m_b1->bbJumpDest == m_b2->bbNext) { // Given the following sequence of blocks : // B1: brtrue(t1, B3) // B2: brtrue(t2, BX) // B3: // we will try to fold it to : // B1: brtrue((!t1)&&t2, BX) // B3: m_sameTarget = false; } else { return false; } Statement* const s1 = optOptimizeBoolsChkBlkCond(); if (s1 == nullptr) { return false; } // Find the branch conditions of m_b1 and m_b2 m_c1 = optIsBoolComp(&m_testInfo1); if (m_c1 == nullptr) { return false; } m_c2 = optIsBoolComp(&m_testInfo2); if (m_c2 == nullptr) { return false; } // Find the type and cost conditions of m_testInfo1 and m_testInfo2 if (!optOptimizeBoolsChkTypeCostCond()) { return false; } // Get the fold operator and the comparison operator genTreeOps foldOp; genTreeOps cmpOp; var_types foldType = m_c1->TypeGet(); if (varTypeIsGC(foldType)) { foldType = TYP_I_IMPL; } assert(m_testInfo1.compTree->gtOper == GT_EQ || m_testInfo1.compTree->gtOper == GT_NE); if (m_sameTarget) { // Both conditions must be the same if (m_testInfo1.compTree->gtOper != m_testInfo2.compTree->gtOper) { return false; } if (m_testInfo1.compTree->gtOper == GT_EQ) { // t1:c1==0 t2:c2==0 ==> Branch to BX if either value is 0 // So we will branch to BX if (c1&c2)==0 foldOp = GT_AND; cmpOp = GT_EQ; } else { // t1:c1!=0 t2:c2!=0 ==> Branch to BX if either value is non-0 // So we will branch to BX if (c1|c2)!=0 foldOp = GT_OR; cmpOp = GT_NE; } } else { // The m_b1 condition must be the reverse of the m_b2 condition because the only operators // that we will see here are GT_EQ and GT_NE. So, if they are not the same, we have one of each. if (m_testInfo1.compTree->gtOper == m_testInfo2.compTree->gtOper) { return false; } if (m_testInfo1.compTree->gtOper == GT_EQ) { // t1:c1==0 t2:c2!=0 ==> Branch to BX if both values are non-0 // So we will branch to BX if (c1&c2)!=0 foldOp = GT_AND; cmpOp = GT_NE; } else { // t1:c1!=0 t2:c2==0 ==> Branch to BX if both values are 0 // So we will branch to BX if (c1|c2)==0 foldOp = GT_OR; cmpOp = GT_EQ; } } // Anding requires both values to be 0 or 1 if ((foldOp == GT_AND) && (!m_testInfo1.isBool || !m_testInfo2.isBool)) { return false; } // // Now update the trees // m_foldOp = foldOp; m_foldType = foldType; m_cmpOp = cmpOp; optOptimizeBoolsUpdateTrees(); #ifdef DEBUG if (m_comp->verbose) { printf("Folded %sboolean conditions of " FMT_BB " and " FMT_BB " to :\n", m_c2->OperIsLeaf() ? "" : "non-leaf ", m_b1->bbNum, m_b2->bbNum); m_comp->gtDispStmt(s1); printf("\n"); } #endif // Return true to continue the bool optimization for the rest of the BB chain return true; } //----------------------------------------------------------------------------- // optOptimizeBoolsChkBlkCond: Checks block conditions if it can be boolean optimized // // Return: // If all conditions pass, returns the last statement of m_b1, else return nullptr. // // Notes: // This method checks if the second (and third block for cond/return/return case) contains only one statement, // and checks if tree operators are of the right type, e.g, GT_JTRUE, GT_RETURN. // // On entry, m_b1, m_b2 are set and m_b3 is set for cond/return/return case. // If it passes all the conditions, m_testInfo1.testTree, m_testInfo2.testTree and m_t3 are set // to the root nodes of m_b1, m_b2 and m_b3 each. // SameTarget is also updated to true if m_b1 and m_b2 jump to the same destination. // Statement* OptBoolsDsc::optOptimizeBoolsChkBlkCond() { assert(m_b1 != nullptr && m_b2 != nullptr); bool optReturnBlock = false; if (m_b3 != nullptr) { optReturnBlock = true; } // Find the block conditions of m_b1 and m_b2 if (m_b2->countOfInEdges() > 1 || (optReturnBlock && m_b3->countOfInEdges() > 1)) { return nullptr; } // Find the condition for the first block Statement* s1 = m_b1->lastStmt(); GenTree* testTree1 = s1->GetRootNode(); assert(testTree1->gtOper == GT_JTRUE); // The second and the third block must contain a single statement Statement* s2 = m_b2->firstStmt(); if (s2->GetPrevStmt() != s2) { return nullptr; } GenTree* testTree2 = s2->GetRootNode(); if (!optReturnBlock) { assert(testTree2->gtOper == GT_JTRUE); } else { if (testTree2->gtOper != GT_RETURN) { return nullptr; } Statement* s3 = m_b3->firstStmt(); if (s3->GetPrevStmt() != s3) { return nullptr; } GenTree* testTree3 = s3->GetRootNode(); if (testTree3->gtOper != GT_RETURN) { return nullptr; } if (!varTypeIsIntegral(testTree2->TypeGet()) || !varTypeIsIntegral(testTree3->TypeGet())) { return nullptr; } // The third block is Return with "CNS_INT int 0/1" if (testTree3->AsOp()->gtOp1->gtOper != GT_CNS_INT) { return nullptr; } if (testTree3->AsOp()->gtOp1->gtType != TYP_INT) { return nullptr; } m_t3 = testTree3; } m_testInfo1.testTree = testTree1; m_testInfo2.testTree = testTree2; return s1; } //----------------------------------------------------------------------------- // optOptimizeBoolsChkTypeCostCond: Checks if type conditions meet the folding condition, and // if cost to fold is not too expensive // // Return: // True if it meets type conditions and cost conditions. Else false. // bool OptBoolsDsc::optOptimizeBoolsChkTypeCostCond() { assert(m_testInfo1.compTree->OperIs(GT_EQ, GT_NE) && m_testInfo1.compTree->AsOp()->gtOp1 == m_c1); assert(m_testInfo2.compTree->OperIs(GT_EQ, GT_NE) && m_testInfo2.compTree->AsOp()->gtOp1 == m_c2); // // Leave out floats where the bit-representation is more complicated // - there are two representations for 0. // if (varTypeIsFloating(m_c1->TypeGet()) || varTypeIsFloating(m_c2->TypeGet())) { return false; } // Make sure the types involved are of the same sizes if (genTypeSize(m_c1->TypeGet()) != genTypeSize(m_c2->TypeGet())) { return false; } if (genTypeSize(m_testInfo1.compTree->TypeGet()) != genTypeSize(m_testInfo2.compTree->TypeGet())) { return false; } #ifdef TARGET_ARMARCH // Skip the small operand which we cannot encode. if (varTypeIsSmall(m_c1->TypeGet())) return false; #endif // The second condition must not contain side effects if (m_c2->gtFlags & GTF_GLOB_EFFECT) { return false; } // The second condition must not be too expensive m_comp->gtPrepareCost(m_c2); if (m_c2->GetCostEx() > 12) { return false; } return true; } //----------------------------------------------------------------------------- // optOptimizeBoolsUpdateTrees: Fold the trees based on fold type and comparison type, // update the edges, unlink removed blocks and update loop table // void OptBoolsDsc::optOptimizeBoolsUpdateTrees() { assert(m_b1 != nullptr && m_b2 != nullptr); bool optReturnBlock = false; if (m_b3 != nullptr) { optReturnBlock = true; } assert(m_foldOp != NULL && m_foldType != NULL && m_c1 != nullptr && m_c2 != nullptr); GenTree* cmpOp1 = m_comp->gtNewOperNode(m_foldOp, m_foldType, m_c1, m_c2); if (m_testInfo1.isBool && m_testInfo2.isBool) { // When we 'OR'/'AND' two booleans, the result is boolean as well cmpOp1->gtFlags |= GTF_BOOLEAN; } GenTree* t1Comp = m_testInfo1.compTree; t1Comp->SetOper(m_cmpOp); t1Comp->AsOp()->gtOp1 = cmpOp1; t1Comp->AsOp()->gtOp2->gtType = m_foldType; // Could have been varTypeIsGC() if (optReturnBlock) { // Update tree when m_b1 is BBJ_COND and m_b2 and m_b3 are GT_RETURN (BBJ_RETURN) t1Comp->AsOp()->gtOp2->AsIntCon()->gtIconVal = 0; m_testInfo1.testTree->gtOper = GT_RETURN; m_testInfo1.testTree->gtType = m_testInfo2.testTree->gtType; // Update the return count of flow graph assert(m_comp->fgReturnCount >= 2); --m_comp->fgReturnCount; } #if FEATURE_SET_FLAGS // For comparisons against zero we will have the GTF_SET_FLAGS set // and this can cause an assert to fire in fgMoveOpsLeft(GenTree* tree) // during the CSE phase. // // So make sure to clear any GTF_SET_FLAGS bit on these operations // as they are no longer feeding directly into a comparisons against zero // Make sure that the GTF_SET_FLAGS bit is cleared. // Fix 388436 ARM JitStress WP7 m_c1->gtFlags &= ~GTF_SET_FLAGS; m_c2->gtFlags &= ~GTF_SET_FLAGS; // The new top level node that we just created does feed directly into // a comparison against zero, so set the GTF_SET_FLAGS bit so that // we generate an instruction that sets the flags, which allows us // to omit the cmp with zero instruction. // Request that the codegen for cmpOp1 sets the condition flags // when it generates the code for cmpOp1. // cmpOp1->gtRequestSetFlags(); #endif if (!optReturnBlock) { // Update edges if m_b1: BBJ_COND and m_b2: BBJ_COND flowList* edge1 = m_comp->fgGetPredForBlock(m_b1->bbJumpDest, m_b1); flowList* edge2; if (m_sameTarget) { edge2 = m_comp->fgGetPredForBlock(m_b2->bbJumpDest, m_b2); } else { edge2 = m_comp->fgGetPredForBlock(m_b2->bbNext, m_b2); m_comp->fgRemoveRefPred(m_b1->bbJumpDest, m_b1); m_b1->bbJumpDest = m_b2->bbJumpDest; m_comp->fgAddRefPred(m_b2->bbJumpDest, m_b1); } assert(edge1 != nullptr); assert(edge2 != nullptr); weight_t edgeSumMin = edge1->edgeWeightMin() + edge2->edgeWeightMin(); weight_t edgeSumMax = edge1->edgeWeightMax() + edge2->edgeWeightMax(); if ((edgeSumMax >= edge1->edgeWeightMax()) && (edgeSumMax >= edge2->edgeWeightMax())) { edge1->setEdgeWeights(edgeSumMin, edgeSumMax, m_b1->bbJumpDest); } else { edge1->setEdgeWeights(BB_ZERO_WEIGHT, BB_MAX_WEIGHT, m_b1->bbJumpDest); } } /* Modify the target of the conditional jump and update bbRefs and bbPreds */ if (optReturnBlock) { m_b1->bbJumpDest = nullptr; m_b1->bbJumpKind = BBJ_RETURN; #ifdef DEBUG m_b1->bbJumpSwt = m_b2->bbJumpSwt; #endif assert(m_b2->bbJumpKind == BBJ_RETURN); assert(m_b1->bbNext == m_b2); assert(m_b3 != nullptr); } else { assert(m_b1->bbJumpKind == BBJ_COND); assert(m_b2->bbJumpKind == BBJ_COND); assert(m_b1->bbJumpDest == m_b2->bbJumpDest); assert(m_b1->bbNext == m_b2); assert(m_b2->bbNext != nullptr); } if (!optReturnBlock) { // Update bbRefs and bbPreds // // Replace pred 'm_b2' for 'm_b2->bbNext' with 'm_b1' // Remove pred 'm_b2' for 'm_b2->bbJumpDest' m_comp->fgReplacePred(m_b2->bbNext, m_b2, m_b1); m_comp->fgRemoveRefPred(m_b2->bbJumpDest, m_b2); } // Get rid of the second block m_comp->fgUnlinkBlock(m_b2); m_b2->bbFlags |= BBF_REMOVED; // If m_b2 was the last block of a try or handler, update the EH table. m_comp->ehUpdateForDeletedBlock(m_b2); if (optReturnBlock) { // Get rid of the third block m_comp->fgUnlinkBlock(m_b3); m_b3->bbFlags |= BBF_REMOVED; // If m_b3 was the last block of a try or handler, update the EH table. m_comp->ehUpdateForDeletedBlock(m_b3); } // Update loop table m_comp->fgUpdateLoopsAfterCompacting(m_b1, m_b2); if (optReturnBlock) { m_comp->fgUpdateLoopsAfterCompacting(m_b1, m_b3); } } //----------------------------------------------------------------------------- // optOptimizeBoolsReturnBlock: Optimize boolean when m_b1 is BBJ_COND and m_b2 and m_b3 are BBJ_RETURN // // Arguments: // b3: Pointer to basic block b3 // // Returns: // true if boolean optimization is done and m_b1, m_b2 and m_b3 are folded into m_b1, else false. // // Notes: // m_b1, m_b2 and m_b3 of OptBoolsDsc are set on entry. // // if B1.bbJumpDest == b3, it transforms // B1 : brtrue(t1, B3) // B2 : ret(t2) // B3 : ret(0) // to // B1 : ret((!t1) && t2) // // For example, (x==0 && y==0) generates: // B1: GT_JTRUE (BBJ_COND), jumps to B3 // B2: GT_RETURN (BBJ_RETURN) // B3: GT_RETURN (BBJ_RETURN), // and it is folded into // B1: GT_RETURN (BBJ_RETURN) // bool OptBoolsDsc::optOptimizeBoolsReturnBlock(BasicBlock* b3) { assert(m_b1 != nullptr && m_b2 != nullptr); // m_b3 is set for cond/return/return case m_b3 = b3; m_sameTarget = false; Statement* const s1 = optOptimizeBoolsChkBlkCond(); if (s1 == nullptr) { return false; } // Find the branch conditions of m_b1 and m_b2 m_c1 = optIsBoolComp(&m_testInfo1); if (m_c1 == nullptr) { return false; } m_c2 = optIsBoolComp(&m_testInfo2); if (m_c2 == nullptr) { return false; } // Find the type and cost conditions of m_testInfo1 and m_testInfo2 if (!optOptimizeBoolsChkTypeCostCond()) { return false; } // Get the fold operator (m_foldOp, e.g., GT_OR/GT_AND) and // the comparison operator (m_cmpOp, e.g., GT_EQ/GT_NE) var_types foldType = m_c1->TypeGet(); if (varTypeIsGC(foldType)) { foldType = TYP_I_IMPL; } m_foldType = foldType; m_foldOp = GT_NONE; m_cmpOp = GT_NONE; genTreeOps foldOp; genTreeOps cmpOp; ssize_t it1val = m_testInfo1.compTree->AsOp()->gtOp2->AsIntCon()->gtIconVal; ssize_t it2val = m_testInfo2.compTree->AsOp()->gtOp2->AsIntCon()->gtIconVal; ssize_t it3val = m_t3->AsOp()->gtOp1->AsIntCon()->gtIconVal; if ((m_testInfo1.compTree->gtOper == GT_NE && m_testInfo2.compTree->gtOper == GT_EQ) && (it1val == 0 && it2val == 0 && it3val == 0)) { // Case: x == 0 && y == 0 // t1:c1!=0 t2:c2==0 t3:c3==0 // ==> true if (c1|c2)==0 foldOp = GT_OR; cmpOp = GT_EQ; } else if ((m_testInfo1.compTree->gtOper == GT_EQ && m_testInfo2.compTree->gtOper == GT_NE) && (it1val == 0 && it2val == 0 && it3val == 0)) { // Case: x == 1 && y ==1 // t1:c1!=1 t2:c2==1 t3:c3==0 is reversed from optIsBoolComp() to: t1:c1==0 t2:c2!=0 t3:c3==0 // ==> true if (c1&c2)!=0 foldOp = GT_AND; cmpOp = GT_NE; } else if ((m_testInfo1.compTree->gtOper == GT_EQ && m_testInfo2.compTree->gtOper == GT_EQ) && (it1val == 0 && it2val == 0 && it3val == 1)) { // Case: x == 0 || y == 0 // t1:c1==0 t2:c2==0 t3:c3==1 // ==> true if (c1&c2)==0 foldOp = GT_AND; cmpOp = GT_EQ; } else if ((m_testInfo1.compTree->gtOper == GT_NE && m_testInfo2.compTree->gtOper == GT_NE) && (it1val == 0 && it2val == 0 && it3val == 1)) { // Case: x == 1 || y == 1 // t1:c1==1 t2:c2==1 t3:c3==1 is reversed from optIsBoolComp() to: t1:c1!=0 t2:c2!=0 t3:c3==1 // ==> true if (c1|c2)!=0 foldOp = GT_OR; cmpOp = GT_NE; } else { // Require NOT operation for operand(s). Do Not fold. return false; } if ((foldOp == GT_AND || cmpOp == GT_NE) && (!m_testInfo1.isBool || !m_testInfo2.isBool)) { // x == 1 && y == 1: Skip cases where x or y is greather than 1, e.g., x=3, y=1 // x == 0 || y == 0: Skip cases where x and y have opposite bits set, e.g., x=2, y=1 // x == 1 || y == 1: Skip cases where either x or y is greater than 1, e.g., x=2, y=0 return false; } m_foldOp = foldOp; m_cmpOp = cmpOp; // Now update the trees optOptimizeBoolsUpdateTrees(); #ifdef DEBUG if (m_comp->verbose) { printf("Folded %sboolean conditions of " FMT_BB ", " FMT_BB " and " FMT_BB " to :\n", m_c2->OperIsLeaf() ? "" : "non-leaf ", m_b1->bbNum, m_b2->bbNum, m_b3->bbNum); m_comp->gtDispStmt(s1); printf("\n"); } #endif // Return true to continue the bool optimization for the rest of the BB chain return true; } //----------------------------------------------------------------------------- // optOptimizeBoolsGcStress: Replace x==null with (x|x)==0 if x is a GC-type. // This will stress code-gen and the emitter to make sure they support such trees. // #ifdef DEBUG void OptBoolsDsc::optOptimizeBoolsGcStress() { if (!m_comp->compStressCompile(m_comp->STRESS_OPT_BOOLS_GC, 20)) { return; } assert(m_b1->bbJumpKind == BBJ_COND); GenTree* cond = m_b1->lastStmt()->GetRootNode(); assert(cond->gtOper == GT_JTRUE); OptTestInfo test; test.testTree = cond; GenTree* comparand = optIsBoolComp(&test); if (comparand == nullptr || !varTypeIsGC(comparand->TypeGet())) { return; } GenTree* relop = test.compTree; bool isBool = test.isBool; if (comparand->gtFlags & (GTF_ASG | GTF_CALL | GTF_ORDER_SIDEEFF)) { return; } GenTree* comparandClone = m_comp->gtCloneExpr(comparand); noway_assert(relop->AsOp()->gtOp1 == comparand); genTreeOps oper = m_comp->compStressCompile(m_comp->STRESS_OPT_BOOLS_GC, 50) ? GT_OR : GT_AND; relop->AsOp()->gtOp1 = m_comp->gtNewOperNode(oper, TYP_I_IMPL, comparand, comparandClone); // Comparand type is already checked, and we have const int, there is no harm // morphing it into a TYP_I_IMPL. noway_assert(relop->AsOp()->gtOp2->gtOper == GT_CNS_INT); relop->AsOp()->gtOp2->gtType = TYP_I_IMPL; } #endif //----------------------------------------------------------------------------- // optIsBoolComp: Function used by folding of boolean conditionals // // Arguments: // pOptTest The test info for the test tree // // Return: // On success, return the first operand (gtOp1) of compTree, else return nullptr. // // Notes: // On entry, testTree is set. // On success, compTree is set to the compare node (i.e. GT_EQ or GT_NE) of the testTree. // isBool is set to true if the comparand (i.e., operand 1 of compTree is boolean. Otherwise, false. // // Given a GT_JTRUE or GT_RETURN node, this method checks if it is a boolean comparison // of the form "if (boolVal ==/!= 0/1)".This is translated into // a GT_EQ/GT_NE node with "opr1" being a boolean lclVar and "opr2" the const 0/1. // // When isBool == true, if the comparison was against a 1 (i.e true) // then we morph the tree by reversing the GT_EQ/GT_NE and change the 1 to 0. // GenTree* OptBoolsDsc::optIsBoolComp(OptTestInfo* pOptTest) { pOptTest->isBool = false; assert(pOptTest->testTree->gtOper == GT_JTRUE || pOptTest->testTree->gtOper == GT_RETURN); GenTree* cond = pOptTest->testTree->AsOp()->gtOp1; // The condition must be "!= 0" or "== 0" if ((cond->gtOper != GT_EQ) && (cond->gtOper != GT_NE)) { return nullptr; } // Return the compare node to the caller pOptTest->compTree = cond; // Get hold of the comparands GenTree* opr1 = cond->AsOp()->gtOp1; GenTree* opr2 = cond->AsOp()->gtOp2; if (opr2->gtOper != GT_CNS_INT) { return nullptr; } if (!opr2->IsIntegralConst(0) && !opr2->IsIntegralConst(1)) { return nullptr; } ssize_t ival2 = opr2->AsIntCon()->gtIconVal; // Is the value a boolean? // We can either have a boolean expression (marked GTF_BOOLEAN) or // a local variable that is marked as being boolean (lvIsBoolean) if (opr1->gtFlags & GTF_BOOLEAN) { pOptTest->isBool = true; } else if ((opr1->gtOper == GT_CNS_INT) && (opr1->IsIntegralConst(0) || opr1->IsIntegralConst(1))) { pOptTest->isBool = true; } else if (opr1->gtOper == GT_LCL_VAR) { // is it a boolean local variable? unsigned lclNum = opr1->AsLclVarCommon()->GetLclNum(); noway_assert(lclNum < m_comp->lvaCount); if (m_comp->lvaTable[lclNum].lvIsBoolean) { pOptTest->isBool = true; } } // Was our comparison against the constant 1 (i.e. true) if (ival2 == 1) { // If this is a boolean expression tree we can reverse the relop // and change the true to false. if (pOptTest->isBool) { m_comp->gtReverseCond(cond); opr2->AsIntCon()->gtIconVal = 0; } else { return nullptr; } } return opr1; } //----------------------------------------------------------------------------- // optOptimizeBools: Folds boolean conditionals for GT_JTRUE/GT_RETURN nodes // // Notes: // If the operand of GT_JTRUE/GT_RETURN node is GT_EQ/GT_NE of the form // "if (boolVal ==/!= 0/1)", the GT_EQ/GT_NE nodes are translated into a // GT_EQ/GT_NE node with // "op1" being a boolean GT_OR/GT_AND lclVar and // "op2" the const 0/1. // For example, the folded tree for the below boolean optimization is shown below: // Case 1: (x == 0 && y ==0) => (x | y) == 0 // * RETURN int // \--* EQ int // +--* OR int // | +--* LCL_VAR int V00 arg0 // | \--* LCL_VAR int V01 arg1 // \--* CNS_INT int 0 // // Case 2: (x == null && y == null) ==> (x | y) == 0 // * RETURN int // \-- * EQ int // + -- * OR long // | +-- * LCL_VAR ref V00 arg0 // | \-- * LCL_VAR ref V01 arg1 // \-- * CNS_INT long 0 // // Case 3: (x == 0 && y == 0 && z == 0) ==> ((x | y) | z) == 0 // * RETURN int // \-- * EQ int // + -- * OR int // | +-- * OR int // | | +-- * LCL_VAR int V00 arg0 // | | \-- * LCL_VAR int V01 arg1 // | \-- * LCL_VAR int V02 arg2 // \-- * CNS_INT int 0 // // Case 4: (x == 0 && y == 0 && z == 0 && w == 0) ==> (((x | y) | z) | w) == 0 // * RETURN int // \-- * EQ int // + * OR int // | +--* OR int // | | +--* OR int // | | | +--* LCL_VAR int V00 arg0 // | | | \--* LCL_VAR int V01 arg1 // | | \--* LCL_VAR int V02 arg2 // | \--* LCL_VAR int V03 arg3 // \--* CNS_INT int 0 // // Patterns that are not optimized include (x == 1 && y == 1), (x == 1 || y == 1), // (x == 0 || y == 0) because currently their comptree is not marked as boolean expression. // When m_foldOp == GT_AND or m_cmpOp == GT_NE, both compTrees must be boolean expression // in order to skip below cases when compTree is not boolean expression: // - x == 1 && y == 1 ==> (x&y)!=0: Skip cases where x or y is greather than 1, e.g., x=3, y=1 // - x == 1 || y == 1 ==> (x|y)!=0: Skip cases where either x or y is greater than 1, e.g., x=2, y=0 // - x == 0 || y == 0 ==> (x&y)==0: Skip cases where x and y have opposite bits set, e.g., x=2, y=1 // void Compiler::optOptimizeBools() { #ifdef DEBUG if (verbose) { printf("*************** In optOptimizeBools()\n"); if (verboseTrees) { printf("Blocks/Trees before phase\n"); fgDispBasicBlocks(true); } } #endif bool change; do { change = false; for (BasicBlock* const b1 : Blocks()) { // We're only interested in conditional jumps here if (b1->bbJumpKind != BBJ_COND) { continue; } // If there is no next block, we're done BasicBlock* b2 = b1->bbNext; if (b2 == nullptr) { break; } // The next block must not be marked as BBF_DONT_REMOVE if (b2->bbFlags & BBF_DONT_REMOVE) { continue; } OptBoolsDsc optBoolsDsc(b1, b2, this); // The next block needs to be a condition or return block. if (b2->bbJumpKind == BBJ_COND) { if ((b1->bbJumpDest != b2->bbJumpDest) && (b1->bbJumpDest != b2->bbNext)) { continue; } // When it is conditional jumps if (optBoolsDsc.optOptimizeBoolsCondBlock()) { change = true; } } else if (b2->bbJumpKind == BBJ_RETURN) { // Set b3 to b1 jump destination BasicBlock* b3 = b1->bbJumpDest; // b3 must not be marked as BBF_DONT_REMOVE if (b3->bbFlags & BBF_DONT_REMOVE) { continue; } // b3 must be RETURN type if (b3->bbJumpKind != BBJ_RETURN) { continue; } if (optBoolsDsc.optOptimizeBoolsReturnBlock(b3)) { change = true; } } else { #ifdef DEBUG optBoolsDsc.optOptimizeBoolsGcStress(); #endif } } } while (change); #ifdef DEBUG fgDebugCheckBBlist(); #endif } typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, unsigned> LclVarRefCounts; //------------------------------------------------------------------------------------------ // optRemoveRedundantZeroInits: Remove redundant zero intializations. // // Notes: // This phase iterates over basic blocks starting with the first basic block until there is no unique // basic block successor or until it detects a loop. It keeps track of local nodes it encounters. // When it gets to an assignment to a local variable or a local field, it checks whether the assignment // is the first reference to the local (or to the parent of the local field), and, if so, // it may do one of two optimizations: // 1. If the following conditions are true: // the local is untracked, // the rhs of the assignment is 0, // the local is guaranteed to be fully initialized in the prolog, // then the explicit zero initialization is removed. // 2. If the following conditions are true: // the assignment is to a local (and not a field), // the local is not lvLiveInOutOfHndlr or no exceptions can be thrown between the prolog and the assignment, // either the local has no gc pointers or there are no gc-safe points between the prolog and the assignment, // then the local is marked with lvHasExplicitInit which tells the codegen not to insert zero initialization // for this local in the prolog. void Compiler::optRemoveRedundantZeroInits() { #ifdef DEBUG if (verbose) { printf("*************** In optRemoveRedundantZeroInits()\n"); } #endif // DEBUG CompAllocator allocator(getAllocator(CMK_ZeroInit)); LclVarRefCounts refCounts(allocator); BitVecTraits bitVecTraits(lvaCount, this); BitVec zeroInitLocals = BitVecOps::MakeEmpty(&bitVecTraits); bool hasGCSafePoint = false; bool canThrow = false; assert(fgStmtListThreaded); for (BasicBlock* block = fgFirstBB; (block != nullptr) && ((block->bbFlags & BBF_MARKED) == 0); block = block->GetUniqueSucc()) { block->bbFlags |= BBF_MARKED; CompAllocator allocator(getAllocator(CMK_ZeroInit)); LclVarRefCounts defsInBlock(allocator); bool removedTrackedDefs = false; for (Statement* stmt = block->FirstNonPhiDef(); stmt != nullptr;) { Statement* next = stmt->GetNextStmt(); for (GenTree* const tree : stmt->TreeList()) { if (((tree->gtFlags & GTF_CALL) != 0)) { hasGCSafePoint = true; } if ((tree->gtFlags & GTF_EXCEPT) != 0) { canThrow = true; } switch (tree->gtOper) { case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: { unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); unsigned* pRefCount = refCounts.LookupPointer(lclNum); if (pRefCount != nullptr) { *pRefCount = (*pRefCount) + 1; } else { refCounts.Set(lclNum, 1); } if ((tree->gtFlags & GTF_VAR_DEF) == 0) { break; } // We need to count the number of tracked var defs in the block // so that we can update block->bbVarDef if we remove any tracked var defs. LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (lclDsc->lvTracked) { unsigned* pDefsCount = defsInBlock.LookupPointer(lclNum); if (pDefsCount != nullptr) { *pDefsCount = (*pDefsCount) + 1; } else { defsInBlock.Set(lclNum, 1); } } else if (varTypeIsStruct(lclDsc) && ((tree->gtFlags & GTF_VAR_USEASG) == 0) && lvaGetPromotionType(lclDsc) != PROMOTION_TYPE_NONE) { for (unsigned i = lclDsc->lvFieldLclStart; i < lclDsc->lvFieldLclStart + lclDsc->lvFieldCnt; ++i) { if (lvaGetDesc(i)->lvTracked) { unsigned* pDefsCount = defsInBlock.LookupPointer(i); if (pDefsCount != nullptr) { *pDefsCount = (*pDefsCount) + 1; } else { defsInBlock.Set(i, 1); } } } } break; } // case GT_CALL: // TODO-CQ: Need to remove redundant zero-inits for "return buffer". // assert(!"Need to handle zero inits.\n"); // break; case GT_ASG: { GenTreeOp* treeOp = tree->AsOp(); GenTreeLclVarCommon* lclVar; bool isEntire; if (!tree->DefinesLocal(this, &lclVar, &isEntire)) { break; } const unsigned lclNum = lclVar->GetLclNum(); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); unsigned* pRefCount = refCounts.LookupPointer(lclNum); // pRefCount can't be null because the local node on the lhs of the assignment // must have already been seen. assert(pRefCount != nullptr); if (*pRefCount != 1) { break; } unsigned parentRefCount = 0; if (lclDsc->lvIsStructField && refCounts.Lookup(lclDsc->lvParentLcl, &parentRefCount) && (parentRefCount != 0)) { break; } unsigned fieldRefCount = 0; if (lclDsc->lvPromoted) { for (unsigned i = lclDsc->lvFieldLclStart; (fieldRefCount == 0) && (i < lclDsc->lvFieldLclStart + lclDsc->lvFieldCnt); ++i) { refCounts.Lookup(i, &fieldRefCount); } } if (fieldRefCount != 0) { break; } // The local hasn't been referenced before this assignment. bool removedExplicitZeroInit = false; if (treeOp->gtGetOp2()->IsIntegralConst(0)) { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; if (!bbInALoop || bbIsReturn) { if (BitVecOps::IsMember(&bitVecTraits, zeroInitLocals, lclNum) || (lclDsc->lvIsStructField && BitVecOps::IsMember(&bitVecTraits, zeroInitLocals, lclDsc->lvParentLcl)) || ((!lclDsc->lvTracked || !isEntire) && !fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn))) { // We are guaranteed to have a zero initialization in the prolog or a // dominating explicit zero initialization and the local hasn't been redefined // between the prolog and this explicit zero initialization so the assignment // can be safely removed. if (tree == stmt->GetRootNode()) { fgRemoveStmt(block, stmt); removedExplicitZeroInit = true; lclDsc->lvSuppressedZeroInit = 1; if (lclDsc->lvTracked) { removedTrackedDefs = true; unsigned* pDefsCount = defsInBlock.LookupPointer(lclNum); *pDefsCount = (*pDefsCount) - 1; } } } if (isEntire) { BitVecOps::AddElemD(&bitVecTraits, zeroInitLocals, lclNum); } *pRefCount = 0; } } if (!removedExplicitZeroInit && isEntire && (!canThrow || !lclDsc->lvLiveInOutOfHndlr)) { // If compMethodRequiresPInvokeFrame() returns true, lower may later // insert a call to CORINFO_HELP_INIT_PINVOKE_FRAME which is a gc-safe point. if (!lclDsc->HasGCPtr() || (!GetInterruptible() && !hasGCSafePoint && !compMethodRequiresPInvokeFrame())) { // The local hasn't been used and won't be reported to the gc between // the prolog and this explicit intialization. Therefore, it doesn't // require zero initialization in the prolog. lclDsc->lvHasExplicitInit = 1; JITDUMP("Marking " FMT_LP " as having an explicit init\n", lclNum); } } break; } default: break; } } stmt = next; } if (removedTrackedDefs) { LclVarRefCounts::KeyIterator iter(defsInBlock.Begin()); LclVarRefCounts::KeyIterator end(defsInBlock.End()); for (; !iter.Equal(end); iter++) { unsigned int lclNum = iter.Get(); if (defsInBlock[lclNum] == 0) { VarSetOps::RemoveElemD(this, block->bbVarDef, lvaGetDesc(lclNum)->lvVarIndex); } } } } for (BasicBlock* block = fgFirstBB; (block != nullptr) && ((block->bbFlags & BBF_MARKED) != 0); block = block->GetUniqueSucc()) { block->bbFlags &= ~BBF_MARKED; } } #ifdef DEBUG //------------------------------------------------------------------------ // optAnyChildNotRemoved: Recursively check the child loops of a loop to see if any of them // are still live (that is, not marked as LPFLG_REMOVED). This check is done when we are // removing a parent, just to notify that there is something odd about leaving a live child. // // Arguments: // loopNum - the loop number to check // bool Compiler::optAnyChildNotRemoved(unsigned loopNum) { assert(loopNum < optLoopCount); // Now recursively mark the children. for (BasicBlock::loopNumber l = optLoopTable[loopNum].lpChild; // l != BasicBlock::NOT_IN_LOOP; // l = optLoopTable[l].lpSibling) { if ((optLoopTable[l].lpFlags & LPFLG_REMOVED) == 0) { return true; } if (optAnyChildNotRemoved(l)) { return true; } } // All children were removed return false; } #endif // DEBUG //------------------------------------------------------------------------ // optMarkLoopRemoved: Mark the specified loop as removed (some optimization, such as unrolling, has made the // loop no longer exist). Note that only the given loop is marked as being removed; if it has any children, // they are not touched (but a warning message is output to the JitDump). // // Arguments: // loopNum - the loop number to remove // void Compiler::optMarkLoopRemoved(unsigned loopNum) { JITDUMP("Marking loop " FMT_LP " removed\n", loopNum); assert(loopNum < optLoopCount); LoopDsc& loop = optLoopTable[loopNum]; loop.lpFlags |= LPFLG_REMOVED; #ifdef DEBUG if (optAnyChildNotRemoved(loopNum)) { JITDUMP("Removed loop " FMT_LP " has one or more live children\n", loopNum); } // Note: we can't call `fgDebugCheckLoopTable()` here because if there are live children, it will assert. // Assume the caller is going to fix up the table and `bbNatLoopNum` block annotations before the next time // `fgDebugCheckLoopTable()` is called. #endif // DEBUG }
1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/pal/prebuilt/idl/corpub_i.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /* this ALWAYS GENERATED file contains the IIDs and CLSIDs */ /* link this file in with the server and any clients */ /* File created by MIDL compiler version 8.00.0603 */ /* @@MIDL_FILE_HEADING( ) */ #pragma warning( disable: 4049 ) /* more than 64k source lines */ #ifdef __cplusplus extern "C"{ #endif #include <rpc.h> #include <rpcndr.h> #ifdef _MIDL_USE_GUIDDEF_ #ifndef INITGUID #define INITGUID #include <guiddef.h> #undef INITGUID #else #include <guiddef.h> #endif #define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \ DEFINE_GUID(name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) #else // !_MIDL_USE_GUIDDEF_ #ifndef __IID_DEFINED__ #define __IID_DEFINED__ typedef struct _IID { unsigned long x; unsigned short s1; unsigned short s2; unsigned char c[8]; } IID; #endif // __IID_DEFINED__ #ifndef CLSID_DEFINED #define CLSID_DEFINED typedef IID CLSID; #endif // CLSID_DEFINED #define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \ EXTERN_C __declspec(selectany) const type name = {l,w1,w2,{b1,b2,b3,b4,b5,b6,b7,b8}} #endif // !_MIDL_USE_GUIDDEF_ MIDL_DEFINE_GUID(IID, LIBID_CorpubProcessLib,0xe97ca460,0x657d,0x11d3,0x8d,0x5b,0x00,0x10,0x4b,0x35,0xe7,0xef); MIDL_DEFINE_GUID(CLSID, CLSID_CorpubPublish,0x047a9a40,0x657e,0x11d3,0x8d,0x5b,0x00,0x10,0x4b,0x35,0xe7,0xef); MIDL_DEFINE_GUID(IID, IID_ICorPublish,0x9613A0E7,0x5A68,0x11d3,0x8F,0x84,0x00,0xA0,0xC9,0xB4,0xD5,0x0C); MIDL_DEFINE_GUID(IID, IID_ICorPublishEnum,0xC0B22967,0x5A69,0x11d3,0x8F,0x84,0x00,0xA0,0xC9,0xB4,0xD5,0x0C); MIDL_DEFINE_GUID(IID, IID_ICorPublishProcess,0x18D87AF1,0x5A6A,0x11d3,0x8F,0x84,0x00,0xA0,0xC9,0xB4,0xD5,0x0C); MIDL_DEFINE_GUID(IID, IID_ICorPublishAppDomain,0xD6315C8F,0x5A6A,0x11d3,0x8F,0x84,0x00,0xA0,0xC9,0xB4,0xD5,0x0C); MIDL_DEFINE_GUID(IID, IID_ICorPublishProcessEnum,0xA37FBD41,0x5A69,0x11d3,0x8F,0x84,0x00,0xA0,0xC9,0xB4,0xD5,0x0C); MIDL_DEFINE_GUID(IID, IID_ICorPublishAppDomainEnum,0x9F0C98F5,0x5A6A,0x11d3,0x8F,0x84,0x00,0xA0,0xC9,0xB4,0xD5,0x0C); #undef MIDL_DEFINE_GUID #ifdef __cplusplus } #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /* this ALWAYS GENERATED file contains the IIDs and CLSIDs */ /* link this file in with the server and any clients */ /* File created by MIDL compiler version 8.00.0603 */ /* @@MIDL_FILE_HEADING( ) */ #pragma warning( disable: 4049 ) /* more than 64k source lines */ #ifdef __cplusplus extern "C"{ #endif #include <rpc.h> #include <rpcndr.h> #ifdef _MIDL_USE_GUIDDEF_ #ifndef INITGUID #define INITGUID #include <guiddef.h> #undef INITGUID #else #include <guiddef.h> #endif #define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \ DEFINE_GUID(name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) #else // !_MIDL_USE_GUIDDEF_ #ifndef __IID_DEFINED__ #define __IID_DEFINED__ typedef struct _IID { unsigned long x; unsigned short s1; unsigned short s2; unsigned char c[8]; } IID; #endif // __IID_DEFINED__ #ifndef CLSID_DEFINED #define CLSID_DEFINED typedef IID CLSID; #endif // CLSID_DEFINED #define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \ EXTERN_C __declspec(selectany) const type name = {l,w1,w2,{b1,b2,b3,b4,b5,b6,b7,b8}} #endif // !_MIDL_USE_GUIDDEF_ MIDL_DEFINE_GUID(IID, LIBID_CorpubProcessLib,0xe97ca460,0x657d,0x11d3,0x8d,0x5b,0x00,0x10,0x4b,0x35,0xe7,0xef); MIDL_DEFINE_GUID(CLSID, CLSID_CorpubPublish,0x047a9a40,0x657e,0x11d3,0x8d,0x5b,0x00,0x10,0x4b,0x35,0xe7,0xef); MIDL_DEFINE_GUID(IID, IID_ICorPublish,0x9613A0E7,0x5A68,0x11d3,0x8F,0x84,0x00,0xA0,0xC9,0xB4,0xD5,0x0C); MIDL_DEFINE_GUID(IID, IID_ICorPublishEnum,0xC0B22967,0x5A69,0x11d3,0x8F,0x84,0x00,0xA0,0xC9,0xB4,0xD5,0x0C); MIDL_DEFINE_GUID(IID, IID_ICorPublishProcess,0x18D87AF1,0x5A6A,0x11d3,0x8F,0x84,0x00,0xA0,0xC9,0xB4,0xD5,0x0C); MIDL_DEFINE_GUID(IID, IID_ICorPublishAppDomain,0xD6315C8F,0x5A6A,0x11d3,0x8F,0x84,0x00,0xA0,0xC9,0xB4,0xD5,0x0C); MIDL_DEFINE_GUID(IID, IID_ICorPublishProcessEnum,0xA37FBD41,0x5A69,0x11d3,0x8F,0x84,0x00,0xA0,0xC9,0xB4,0xD5,0x0C); MIDL_DEFINE_GUID(IID, IID_ICorPublishAppDomainEnum,0x9F0C98F5,0x5A6A,0x11d3,0x8F,0x84,0x00,0xA0,0xC9,0xB4,0xD5,0x0C); #undef MIDL_DEFINE_GUID #ifdef __cplusplus } #endif
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/native/external/brotli/enc/compress_fragment_two_pass.h
/* Copyright 2015 Google Inc. All Rights Reserved. Distributed under MIT license. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ /* Function for fast encoding of an input fragment, independently from the input history. This function uses two-pass processing: in the first pass we save the found backward matches and literal bytes into a buffer, and in the second pass we emit them into the bit stream using prefix codes built based on the actual command and literal byte histograms. */ #ifndef BROTLI_ENC_COMPRESS_FRAGMENT_TWO_PASS_H_ #define BROTLI_ENC_COMPRESS_FRAGMENT_TWO_PASS_H_ #include "../common/platform.h" #include <brotli/types.h> #include "./memory.h" #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static const size_t kCompressFragmentTwoPassBlockSize = 1 << 17; /* Compresses "input" string to the "*storage" buffer as one or more complete meta-blocks, and updates the "*storage_ix" bit position. If "is_last" is 1, emits an additional empty last meta-block. REQUIRES: "input_size" is greater than zero, or "is_last" is 1. REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). REQUIRES: "command_buf" and "literal_buf" point to at least kCompressFragmentTwoPassBlockSize long arrays. REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. REQUIRES: "table_size" is a power of two OUTPUT: maximal copy distance <= |input_size| OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */ BROTLI_INTERNAL void BrotliCompressFragmentTwoPass(MemoryManager* m, const uint8_t* input, size_t input_size, BROTLI_BOOL is_last, uint32_t* command_buf, uint8_t* literal_buf, int* table, size_t table_size, size_t* storage_ix, uint8_t* storage); #if defined(__cplusplus) || defined(c_plusplus) } /* extern "C" */ #endif #endif /* BROTLI_ENC_COMPRESS_FRAGMENT_TWO_PASS_H_ */
/* Copyright 2015 Google Inc. All Rights Reserved. Distributed under MIT license. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ /* Function for fast encoding of an input fragment, independently from the input history. This function uses two-pass processing: in the first pass we save the found backward matches and literal bytes into a buffer, and in the second pass we emit them into the bit stream using prefix codes built based on the actual command and literal byte histograms. */ #ifndef BROTLI_ENC_COMPRESS_FRAGMENT_TWO_PASS_H_ #define BROTLI_ENC_COMPRESS_FRAGMENT_TWO_PASS_H_ #include "../common/platform.h" #include <brotli/types.h> #include "./memory.h" #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static const size_t kCompressFragmentTwoPassBlockSize = 1 << 17; /* Compresses "input" string to the "*storage" buffer as one or more complete meta-blocks, and updates the "*storage_ix" bit position. If "is_last" is 1, emits an additional empty last meta-block. REQUIRES: "input_size" is greater than zero, or "is_last" is 1. REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). REQUIRES: "command_buf" and "literal_buf" point to at least kCompressFragmentTwoPassBlockSize long arrays. REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. REQUIRES: "table_size" is a power of two OUTPUT: maximal copy distance <= |input_size| OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */ BROTLI_INTERNAL void BrotliCompressFragmentTwoPass(MemoryManager* m, const uint8_t* input, size_t input_size, BROTLI_BOOL is_last, uint32_t* command_buf, uint8_t* literal_buf, int* table, size_t table_size, size_t* storage_ix, uint8_t* storage); #if defined(__cplusplus) || defined(c_plusplus) } /* extern "C" */ #endif #endif /* BROTLI_ENC_COMPRESS_FRAGMENT_TWO_PASS_H_ */
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/native/public/mono/utils/mono-dl-fallback.h
/** * \file */ #ifndef __MONO_UTILS_DL_FALLBACK_H__ #define __MONO_UTILS_DL_FALLBACK_H__ #include <mono/utils/details/mono-dl-fallback-types.h> MONO_BEGIN_DECLS #define MONO_API_FUNCTION(ret,name,args) MONO_API ret name args; #include <mono/utils/details/mono-dl-fallback-functions.h> #undef MONO_API_FUNCTION MONO_END_DECLS #endif /* __MONO_UTILS_DL_FALLBACK_H__ */
/** * \file */ #ifndef __MONO_UTILS_DL_FALLBACK_H__ #define __MONO_UTILS_DL_FALLBACK_H__ #include <mono/utils/details/mono-dl-fallback-types.h> MONO_BEGIN_DECLS #define MONO_API_FUNCTION(ret,name,args) MONO_API ret name args; #include <mono/utils/details/mono-dl-fallback-functions.h> #undef MONO_API_FUNCTION MONO_END_DECLS #endif /* __MONO_UTILS_DL_FALLBACK_H__ */
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/pal/tests/palsuite/c_runtime/strspn/test1/test1.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test1.c ** ** Purpose: ** Check a character set against a string to see that the function returns ** the length of the substring which consists of all characters in the string. ** Also check that if the character set doesn't match the string at all, that ** the value is 0. ** ** **==========================================================================*/ #include <palsuite.h> struct testCase { long result; char *string1; char *string2; }; PALTEST(c_runtime_strspn_test1_paltest_strspn_test1, "c_runtime/strspn/test1/paltest_strspn_test1") { int i=0; long TheResult = 0; struct testCase testCases[]= { {4,"abcdefg12345678hijklmnopqrst","a2bjk341cd"}, {14,"This is a test, testing", "aeioTts rh"}, {0,"foobar","kpzt"} }; /* * Initialize the PAL */ if (0 != PAL_Initialize(argc, argv)) { return FAIL; } for (i=0; i<sizeof(testCases)/sizeof(struct testCase);i++) { TheResult = strspn(testCases[i].string1,testCases[i].string2); if (TheResult != testCases[i].result) { Fail("Expected strspn to return %d, got %d!\n", testCases[i].result,TheResult); } } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test1.c ** ** Purpose: ** Check a character set against a string to see that the function returns ** the length of the substring which consists of all characters in the string. ** Also check that if the character set doesn't match the string at all, that ** the value is 0. ** ** **==========================================================================*/ #include <palsuite.h> struct testCase { long result; char *string1; char *string2; }; PALTEST(c_runtime_strspn_test1_paltest_strspn_test1, "c_runtime/strspn/test1/paltest_strspn_test1") { int i=0; long TheResult = 0; struct testCase testCases[]= { {4,"abcdefg12345678hijklmnopqrst","a2bjk341cd"}, {14,"This is a test, testing", "aeioTts rh"}, {0,"foobar","kpzt"} }; /* * Initialize the PAL */ if (0 != PAL_Initialize(argc, argv)) { return FAIL; } for (i=0; i<sizeof(testCases)/sizeof(struct testCase);i++) { TheResult = strspn(testCases[i].string1,testCases[i].string2); if (TheResult != testCases[i].result) { Fail("Expected strspn to return %d, got %d!\n", testCases[i].result,TheResult); } } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/jit/optcse.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX OptCSE XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #include "jitstd/algorithm.h" #ifdef _MSC_VER #pragma hdrstop #endif /* static */ const size_t Compiler::s_optCSEhashSizeInitial = EXPSET_SZ * 2; const size_t Compiler::s_optCSEhashGrowthFactor = 2; const size_t Compiler::s_optCSEhashBucketSize = 4; /***************************************************************************** * * We've found all the candidates, build the index for easy access. */ void Compiler::optCSEstop() { if (optCSECandidateCount == 0) { return; } CSEdsc* dsc; CSEdsc** ptr; size_t cnt; optCSEtab = new (this, CMK_CSE) CSEdsc*[optCSECandidateCount](); for (cnt = optCSEhashSize, ptr = optCSEhash; cnt; cnt--, ptr++) { for (dsc = *ptr; dsc; dsc = dsc->csdNextInBucket) { if (dsc->csdIndex) { noway_assert((unsigned)dsc->csdIndex <= optCSECandidateCount); if (optCSEtab[dsc->csdIndex - 1] == nullptr) { optCSEtab[dsc->csdIndex - 1] = dsc; } } } } #ifdef DEBUG for (cnt = 0; cnt < optCSECandidateCount; cnt++) { noway_assert(optCSEtab[cnt] != nullptr); } #endif } /***************************************************************************** * * Return the descriptor for the CSE with the given index. */ inline Compiler::CSEdsc* Compiler::optCSEfindDsc(unsigned index) { noway_assert(index); noway_assert(index <= optCSECandidateCount); noway_assert(optCSEtab[index - 1]); return optCSEtab[index - 1]; } //------------------------------------------------------------------------ // Compiler::optUnmarkCSE // // Arguments: // tree - A sub tree that originally was part of a CSE use // that we are currently in the process of removing. // // Return Value: // Returns true if we can safely remove the 'tree' node. // Returns false if the node is a CSE def that the caller // needs to extract and preserve. // // Notes: // If 'tree' is a CSE use then we perform an unmark CSE operation // so that the CSE used counts and weight are updated properly. // The only caller for this method is optUnmarkCSEs which is a // tree walker visitor function. When we return false this method // returns WALK_SKIP_SUBTREES so that we don't visit the remaining // nodes of the CSE def. // bool Compiler::optUnmarkCSE(GenTree* tree) { if (!IS_CSE_INDEX(tree->gtCSEnum)) { // If this node isn't a CSE use or def we can safely remove this node. // return true; } // make sure it's been initialized noway_assert(optCSEweight >= 0); // Is this a CSE use? if (IS_CSE_USE(tree->gtCSEnum)) { unsigned CSEnum = GET_CSE_INDEX(tree->gtCSEnum); CSEdsc* desc = optCSEfindDsc(CSEnum); #ifdef DEBUG if (verbose) { printf("Unmark CSE use #%02d at ", CSEnum); printTreeID(tree); printf(": %3d -> %3d\n", desc->csdUseCount, desc->csdUseCount - 1); } #endif // DEBUG // Perform an unmark CSE operation // 1. Reduce the nested CSE's 'use' count noway_assert(desc->csdUseCount > 0); if (desc->csdUseCount > 0) { desc->csdUseCount -= 1; if (desc->csdUseWtCnt < optCSEweight) { desc->csdUseWtCnt = 0; } else { desc->csdUseWtCnt -= optCSEweight; } } // 2. Unmark the CSE infomation in the node tree->gtCSEnum = NO_CSE; return true; } else { // It is not safe to remove this node, so we will return false // and the caller must add this node to the side effect list // return false; } } Compiler::fgWalkResult Compiler::optCSE_MaskHelper(GenTree** pTree, fgWalkData* walkData) { GenTree* tree = *pTree; Compiler* comp = walkData->compiler; optCSE_MaskData* pUserData = (optCSE_MaskData*)(walkData->pCallbackData); if (IS_CSE_INDEX(tree->gtCSEnum)) { unsigned cseIndex = GET_CSE_INDEX(tree->gtCSEnum); // Note that we DO NOT use getCSEAvailBit() here, for the CSE_defMask/CSE_useMask unsigned cseBit = genCSEnum2bit(cseIndex); if (IS_CSE_DEF(tree->gtCSEnum)) { BitVecOps::AddElemD(comp->cseMaskTraits, pUserData->CSE_defMask, cseBit); } else { BitVecOps::AddElemD(comp->cseMaskTraits, pUserData->CSE_useMask, cseBit); } } return WALK_CONTINUE; } // This functions walks all the node for an given tree // and return the mask of CSE defs and uses for the tree // void Compiler::optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData) { pMaskData->CSE_defMask = BitVecOps::MakeEmpty(cseMaskTraits); pMaskData->CSE_useMask = BitVecOps::MakeEmpty(cseMaskTraits); fgWalkTreePre(&tree, optCSE_MaskHelper, (void*)pMaskData); } //------------------------------------------------------------------------ // optCSE_canSwap: Determine if the execution order of two nodes can be swapped. // // Arguments: // op1 - The first node // op2 - The second node // // Return Value: // Return true iff it safe to swap the execution order of 'op1' and 'op2', // considering only the locations of the CSE defs and uses. // // Assumptions: // 'op1' currently occurse before 'op2' in the execution order. // bool Compiler::optCSE_canSwap(GenTree* op1, GenTree* op2) { // op1 and op2 must be non-null. assert(op1 != nullptr); assert(op2 != nullptr); bool canSwap = true; // the default result unless proven otherwise. // If we haven't setup cseMaskTraits, do it now if (cseMaskTraits == nullptr) { cseMaskTraits = new (getAllocator(CMK_CSE)) BitVecTraits(optCSECandidateCount, this); } optCSE_MaskData op1MaskData; optCSE_MaskData op2MaskData; optCSE_GetMaskData(op1, &op1MaskData); optCSE_GetMaskData(op2, &op2MaskData); // We cannot swap if op1 contains a CSE def that is used by op2 if (!BitVecOps::IsEmptyIntersection(cseMaskTraits, op1MaskData.CSE_defMask, op2MaskData.CSE_useMask)) { canSwap = false; } else { // We also cannot swap if op2 contains a CSE def that is used by op1. if (!BitVecOps::IsEmptyIntersection(cseMaskTraits, op2MaskData.CSE_defMask, op1MaskData.CSE_useMask)) { canSwap = false; } } return canSwap; } /***************************************************************************** * * Compare function passed to jitstd::sort() by CSE_Heuristic::SortCandidates * when (CodeOptKind() != Compiler::SMALL_CODE) */ /* static */ bool Compiler::optCSEcostCmpEx::operator()(const CSEdsc* dsc1, const CSEdsc* dsc2) { GenTree* exp1 = dsc1->csdTree; GenTree* exp2 = dsc2->csdTree; auto expCost1 = exp1->GetCostEx(); auto expCost2 = exp2->GetCostEx(); if (expCost2 != expCost1) { return expCost2 < expCost1; } // Sort the higher Use Counts toward the top if (dsc2->csdUseWtCnt != dsc1->csdUseWtCnt) { return dsc2->csdUseWtCnt < dsc1->csdUseWtCnt; } // With the same use count, Sort the lower Def Counts toward the top if (dsc1->csdDefWtCnt != dsc2->csdDefWtCnt) { return dsc1->csdDefWtCnt < dsc2->csdDefWtCnt; } // In order to ensure that we have a stable sort, we break ties using the csdIndex return dsc1->csdIndex < dsc2->csdIndex; } /***************************************************************************** * * Compare function passed to jitstd::sort() by CSE_Heuristic::SortCandidates * when (CodeOptKind() == Compiler::SMALL_CODE) */ /* static */ bool Compiler::optCSEcostCmpSz::operator()(const CSEdsc* dsc1, const CSEdsc* dsc2) { GenTree* exp1 = dsc1->csdTree; GenTree* exp2 = dsc2->csdTree; auto expCost1 = exp1->GetCostSz(); auto expCost2 = exp2->GetCostSz(); if (expCost2 != expCost1) { return expCost2 < expCost1; } // Sort the higher Use Counts toward the top if (dsc2->csdUseCount != dsc1->csdUseCount) { return dsc2->csdUseCount < dsc1->csdUseCount; } // With the same use count, Sort the lower Def Counts toward the top if (dsc1->csdDefCount != dsc2->csdDefCount) { return dsc1->csdDefCount < dsc2->csdDefCount; } // In order to ensure that we have a stable sort, we break ties using the csdIndex return dsc1->csdIndex < dsc2->csdIndex; } /***************************************************************************** * * Initialize the Value Number CSE tracking logic. */ void Compiler::optValnumCSE_Init() { #ifdef DEBUG optCSEtab = nullptr; #endif // This gets set in optValnumCSE_InitDataFlow cseLivenessTraits = nullptr; // Initialize when used by optCSE_canSwap() cseMaskTraits = nullptr; // Allocate and clear the hash bucket table optCSEhash = new (this, CMK_CSE) CSEdsc*[s_optCSEhashSizeInitial](); optCSEhashSize = s_optCSEhashSizeInitial; optCSEhashMaxCountBeforeResize = optCSEhashSize * s_optCSEhashBucketSize; optCSEhashCount = 0; optCSECandidateCount = 0; optDoCSE = false; // Stays false until we find duplicate CSE tree // optCseCheckedBoundMap is unused in most functions, allocated only when used optCseCheckedBoundMap = nullptr; } unsigned optCSEKeyToHashIndex(size_t key, size_t optCSEhashSize) { unsigned hash; hash = (unsigned)key; #ifdef TARGET_64BIT hash ^= (unsigned)(key >> 32); #endif hash *= (unsigned)(optCSEhashSize + 1); hash >>= 7; return hash % optCSEhashSize; } //--------------------------------------------------------------------------- // optValnumCSE_Index: // - Returns the CSE index to use for this tree, // or zero if this expression is not currently a CSE. // // Arguments: // tree - The current candidate CSE expression // stmt - The current statement that contains tree // // // Notes: We build a hash table that contains all of the expressions that // are presented to this method. Whenever we see a duplicate expression // we have a CSE candidate. If it is the first time seeing the duplicate // we allocate a new CSE index. If we have already allocated a CSE index // we return that index. There currently is a limit on the number of CSEs // that we can have of MAX_CSE_CNT (64) // unsigned Compiler::optValnumCSE_Index(GenTree* tree, Statement* stmt) { size_t key; unsigned hval; CSEdsc* hashDsc; bool enableSharedConstCSE = false; bool isSharedConst = false; int configValue = JitConfig.JitConstCSE(); #if defined(TARGET_ARM64) // ARM64 - allow to combine with nearby offsets, when config is not 2 or 4 if ((configValue != CONST_CSE_ENABLE_ARM64_NO_SHARING) && (configValue != CONST_CSE_ENABLE_ALL_NO_SHARING)) { enableSharedConstCSE = true; } #endif // TARGET_ARM64 // All Platforms - also allow to combine with nearby offsets, when config is 3 if (configValue == CONST_CSE_ENABLE_ALL) { enableSharedConstCSE = true; } // We use the liberal Value numbers when building the set of CSE ValueNum vnLib = tree->GetVN(VNK_Liberal); ValueNum vnLibNorm = vnStore->VNNormalValue(vnLib); // We use the normal value number because we want the CSE candidate to // represent all expressions that produce the same normal value number. // We will handle the case where we have different exception sets when // promoting the candidates. // // We do this because a GT_IND will usually have a NullPtrExc entry in its // exc set, but we may have cleared the GTF_EXCEPT flag and if so, it won't // have an NullPtrExc, or we may have assigned the value of an GT_IND // into a LCL_VAR and then read it back later. // // When we are promoting the CSE candidates we ensure that any CSE // uses that we promote have an exc set that is the same as the CSE defs // or have an empty set. And that all of the CSE defs produced the required // set of exceptions for the CSE uses. // // We assign either vnLib or vnLibNorm as the hash key // // The only exception to using the normal value is for the GT_COMMA nodes. // Here we check to see if we have a GT_COMMA with a different value number // than the one from its op2. For this case we want to create two different // CSE candidates. This allows us to CSE the GT_COMMA separately from its value. // if (tree->OperGet() == GT_COMMA) { // op2 is the value produced by a GT_COMMA GenTree* op2 = tree->AsOp()->gtOp2; ValueNum vnOp2Lib = op2->GetVN(VNK_Liberal); // If the value number for op2 and tree are different, then some new // exceptions were produced by op1. For that case we will NOT use the // normal value. This allows us to CSE commas with an op1 that is // an BOUNDS_CHECK. // if (vnOp2Lib != vnLib) { key = vnLib; // include the exc set in the hash key } else { key = vnLibNorm; } // If we didn't do the above we would have op1 as the CSE def // and the parent comma as the CSE use (but with a different exc set) // This would prevent us from making any CSE with the comma // assert(vnLibNorm == vnStore->VNNormalValue(vnOp2Lib)); } else if (enableSharedConstCSE && tree->IsIntegralConst()) { assert(vnStore->IsVNConstant(vnLibNorm)); // We don't share small offset constants when they require a reloc // if (!tree->AsIntConCommon()->ImmedValNeedsReloc(this)) { // Here we make constants that have the same upper bits use the same key // // We create a key that encodes just the upper bits of the constant by // shifting out some of the low bits, (12 or 16 bits) // // This is the only case where the hash key is not a ValueNumber // size_t constVal = vnStore->CoercedConstantValue<size_t>(vnLibNorm); key = Encode_Shared_Const_CSE_Value(constVal); isSharedConst = true; } else { // Use the vnLibNorm value as the key key = vnLibNorm; } } else // Not a GT_COMMA or a GT_CNS_INT { key = vnLibNorm; } // Make sure that the result of Is_Shared_Const_CSE(key) matches isSharedConst. // Note that when isSharedConst is true then we require that the TARGET_SIGN_BIT is set in the key // and otherwise we require that we never create a ValueNumber with the TARGET_SIGN_BIT set. // assert(isSharedConst == Is_Shared_Const_CSE(key)); // Compute the hash value for the expression hval = optCSEKeyToHashIndex(key, optCSEhashSize); /* Look for a matching index in the hash table */ bool newCSE = false; for (hashDsc = optCSEhash[hval]; hashDsc; hashDsc = hashDsc->csdNextInBucket) { if (hashDsc->csdHashKey == key) { // Check for mismatched types on GT_CNS_INT nodes if ((tree->OperGet() == GT_CNS_INT) && (tree->TypeGet() != hashDsc->csdTree->TypeGet())) { continue; } treeStmtLst* newElem; /* Have we started the list of matching nodes? */ if (hashDsc->csdTreeList == nullptr) { // Create the new element based upon the matching hashDsc element. newElem = new (this, CMK_TreeStatementList) treeStmtLst; newElem->tslTree = hashDsc->csdTree; newElem->tslStmt = hashDsc->csdStmt; newElem->tslBlock = hashDsc->csdBlock; newElem->tslNext = nullptr; /* Start the list with the first CSE candidate recorded */ hashDsc->csdTreeList = newElem; hashDsc->csdTreeLast = newElem; hashDsc->csdStructHnd = NO_CLASS_HANDLE; hashDsc->csdIsSharedConst = isSharedConst; hashDsc->csdStructHndMismatch = false; if (varTypeIsStruct(tree->gtType)) { // When we have a GT_IND node with a SIMD type then we don't have a reliable // struct handle and gtGetStructHandleIfPresent returns a guess that can be wrong // if ((hashDsc->csdTree->OperGet() != GT_IND) || !varTypeIsSIMD(tree)) { hashDsc->csdStructHnd = gtGetStructHandleIfPresent(hashDsc->csdTree); } } } noway_assert(hashDsc->csdTreeList); /* Append this expression to the end of the list */ newElem = new (this, CMK_TreeStatementList) treeStmtLst; newElem->tslTree = tree; newElem->tslStmt = stmt; newElem->tslBlock = compCurBB; newElem->tslNext = nullptr; hashDsc->csdTreeLast->tslNext = newElem; hashDsc->csdTreeLast = newElem; if (varTypeIsStruct(newElem->tslTree->gtType)) { // When we have a GT_IND node with a SIMD type then we don't have a reliable // struct handle and gtGetStructHandleIfPresent returns a guess that can be wrong // if ((newElem->tslTree->OperGet() != GT_IND) || !varTypeIsSIMD(newElem->tslTree)) { CORINFO_CLASS_HANDLE newElemStructHnd = gtGetStructHandleIfPresent(newElem->tslTree); if (newElemStructHnd != NO_CLASS_HANDLE) { if (hashDsc->csdStructHnd == NO_CLASS_HANDLE) { // The previous node(s) were GT_IND's and didn't carry the struct handle info // The current node does have the struct handle info, so record it now // hashDsc->csdStructHnd = newElemStructHnd; } else if (newElemStructHnd != hashDsc->csdStructHnd) { hashDsc->csdStructHndMismatch = true; #ifdef DEBUG if (verbose) { printf("Abandoned - CSE candidate has mismatching struct handles!\n"); printTreeID(newElem->tslTree); } #endif // DEBUG } } } } optDoCSE = true; // Found a duplicate CSE tree /* Have we assigned a CSE index? */ if (hashDsc->csdIndex == 0) { newCSE = true; break; } assert(FitsIn<signed char>(hashDsc->csdIndex)); tree->gtCSEnum = ((signed char)hashDsc->csdIndex); return hashDsc->csdIndex; } } if (!newCSE) { /* Not found, create a new entry (unless we have too many already) */ if (optCSECandidateCount < MAX_CSE_CNT) { if (optCSEhashCount == optCSEhashMaxCountBeforeResize) { size_t newOptCSEhashSize = optCSEhashSize * s_optCSEhashGrowthFactor; CSEdsc** newOptCSEhash = new (this, CMK_CSE) CSEdsc*[newOptCSEhashSize](); // Iterate through each existing entry, moving to the new table CSEdsc** ptr; CSEdsc* dsc; size_t cnt; for (cnt = optCSEhashSize, ptr = optCSEhash; cnt; cnt--, ptr++) { for (dsc = *ptr; dsc;) { CSEdsc* nextDsc = dsc->csdNextInBucket; size_t newHval = optCSEKeyToHashIndex(dsc->csdHashKey, newOptCSEhashSize); // Move CSEdsc to bucket in enlarged table dsc->csdNextInBucket = newOptCSEhash[newHval]; newOptCSEhash[newHval] = dsc; dsc = nextDsc; } } hval = optCSEKeyToHashIndex(key, newOptCSEhashSize); optCSEhash = newOptCSEhash; optCSEhashSize = newOptCSEhashSize; optCSEhashMaxCountBeforeResize = optCSEhashMaxCountBeforeResize * s_optCSEhashGrowthFactor; } ++optCSEhashCount; hashDsc = new (this, CMK_CSE) CSEdsc; hashDsc->csdHashKey = key; hashDsc->csdConstDefValue = 0; hashDsc->csdConstDefVN = vnStore->VNForNull(); // uninit value hashDsc->csdIndex = 0; hashDsc->csdIsSharedConst = false; hashDsc->csdLiveAcrossCall = false; hashDsc->csdDefCount = 0; hashDsc->csdUseCount = 0; hashDsc->csdDefWtCnt = 0; hashDsc->csdUseWtCnt = 0; hashDsc->defExcSetPromise = vnStore->VNForEmptyExcSet(); hashDsc->defExcSetCurrent = vnStore->VNForNull(); // uninit value hashDsc->defConservNormVN = vnStore->VNForNull(); // uninit value hashDsc->csdTree = tree; hashDsc->csdStmt = stmt; hashDsc->csdBlock = compCurBB; hashDsc->csdTreeList = nullptr; /* Append the entry to the hash bucket */ hashDsc->csdNextInBucket = optCSEhash[hval]; optCSEhash[hval] = hashDsc; } return 0; } else // newCSE is true { /* We get here only after finding a matching CSE */ /* Create a new CSE (unless we have the maximum already) */ if (optCSECandidateCount == MAX_CSE_CNT) { #ifdef DEBUG if (verbose) { printf("Exceeded the MAX_CSE_CNT, not using tree:\n"); gtDispTree(tree); } #endif // DEBUG return 0; } C_ASSERT((signed char)MAX_CSE_CNT == MAX_CSE_CNT); unsigned CSEindex = ++optCSECandidateCount; /* Record the new CSE index in the hashDsc */ hashDsc->csdIndex = CSEindex; /* Update the gtCSEnum field in the original tree */ noway_assert(hashDsc->csdTreeList->tslTree->gtCSEnum == 0); assert(FitsIn<signed char>(CSEindex)); hashDsc->csdTreeList->tslTree->gtCSEnum = ((signed char)CSEindex); noway_assert(((unsigned)hashDsc->csdTreeList->tslTree->gtCSEnum) == CSEindex); tree->gtCSEnum = ((signed char)CSEindex); #ifdef DEBUG if (verbose) { printf("\nCSE candidate #%02u, key=", CSEindex); if (!Compiler::Is_Shared_Const_CSE(key)) { vnPrint((unsigned)key, 0); } else { size_t kVal = Compiler::Decode_Shared_Const_CSE_Value(key); printf("K_%p", dspPtr(kVal)); } printf(" in " FMT_BB ", [cost=%2u, size=%2u]: \n", compCurBB->bbNum, tree->GetCostEx(), tree->GetCostSz()); gtDispTree(tree); } #endif // DEBUG return CSEindex; } } //------------------------------------------------------------------------ // optValnumCSE_Locate: Locate CSE candidates and assign them indices. // // Returns: // true if there are any CSE candidates, false otherwise // bool Compiler::optValnumCSE_Locate() { bool enableConstCSE = true; int configValue = JitConfig.JitConstCSE(); // all platforms - disable CSE of constant values when config is 1 if (configValue == CONST_CSE_DISABLE_ALL) { enableConstCSE = false; } #if !defined(TARGET_ARM64) // non-ARM64 platforms - disable by default // enableConstCSE = false; // Check for the two enable cases for all platforms // if ((configValue == CONST_CSE_ENABLE_ALL) || (configValue == CONST_CSE_ENABLE_ALL_NO_SHARING)) { enableConstCSE = true; } #endif for (BasicBlock* const block : Blocks()) { /* Make the block publicly available */ compCurBB = block; /* Ensure that the BBF_VISITED and BBF_MARKED flag are clear */ /* Everyone who uses these flags are required to clear afterwards */ noway_assert((block->bbFlags & (BBF_VISITED | BBF_MARKED)) == 0); /* Walk the statement trees in this basic block */ for (Statement* const stmt : block->NonPhiStatements()) { const bool isReturn = stmt->GetRootNode()->OperIs(GT_RETURN); /* We walk the tree in the forwards direction (bottom up) */ bool stmtHasArrLenCandidate = false; for (GenTree* const tree : stmt->TreeList()) { if (tree->OperIsCompare() && stmtHasArrLenCandidate) { // Check if this compare is a function of (one of) the checked // bound candidate(s); we may want to update its value number. // if the array length gets CSEd optCseUpdateCheckedBoundMap(tree); } // Don't allow CSE of constants if it is disabled // if (tree->IsIntegralConst()) { if (!enableConstCSE) { continue; } } // Don't allow non-SIMD struct CSEs under a return; we don't fully // re-morph these if we introduce a CSE assignment, and so may create // IR that lower is not yet prepared to handle. // if (isReturn && varTypeIsStruct(tree->gtType) && !varTypeIsSIMD(tree->gtType)) { continue; } if (!optIsCSEcandidate(tree)) { continue; } ValueNum valueVN = vnStore->VNNormalValue(tree->GetVN(VNK_Liberal)); if (ValueNumStore::isReservedVN(valueVN) && (valueVN != ValueNumStore::VNForNull())) { continue; } // We want to CSE simple constant leaf nodes, but we don't want to // CSE non-leaf trees that compute CSE constant values. // Instead we let the Value Number based Assertion Prop phase handle them. // // Here, unlike the rest of optCSE, we use the conservative value number // rather than the liberal one, since the conservative one // is what the Value Number based Assertion Prop will use // and the point is to avoid optimizing cases that it will // handle. // if (!tree->OperIsLeaf() && vnStore->IsVNConstant(vnStore->VNConservativeNormalValue(tree->gtVNPair))) { continue; } /* Assign an index to this expression */ unsigned CSEindex = optValnumCSE_Index(tree, stmt); if (CSEindex != 0) { noway_assert(((unsigned)tree->gtCSEnum) == CSEindex); } if (IS_CSE_INDEX(CSEindex) && (tree->OperGet() == GT_ARR_LENGTH)) { stmtHasArrLenCandidate = true; } } } } /* We're done if there were no interesting expressions */ if (!optDoCSE) { return false; } /* We're finished building the expression lookup table */ optCSEstop(); return true; } //------------------------------------------------------------------------ // optCseUpdateCheckedBoundMap: Check if this compare is a tractable function of // a checked bound that is a CSE candidate, and insert // an entry in the optCseCheckedBoundMap if so. This facilitates // subsequently updating the compare's value number if // the bound gets CSEd. // // Arguments: // compare - The compare node to check // void Compiler::optCseUpdateCheckedBoundMap(GenTree* compare) { assert(compare->OperIsCompare()); ValueNum compareVN = compare->gtVNPair.GetConservative(); VNFuncApp cmpVNFuncApp; if (!vnStore->GetVNFunc(compareVN, &cmpVNFuncApp) || (cmpVNFuncApp.m_func != GetVNFuncForNode(compare))) { // Value numbering inferred this compare as something other // than its own operator; leave its value number alone. return; } // Now look for a checked bound feeding the compare ValueNumStore::CompareCheckedBoundArithInfo info; GenTree* boundParent = nullptr; if (vnStore->IsVNCompareCheckedBound(compareVN)) { // Simple compare of an bound against something else. vnStore->GetCompareCheckedBound(compareVN, &info); boundParent = compare; } else if (vnStore->IsVNCompareCheckedBoundArith(compareVN)) { // Compare of a bound +/- some offset to something else. GenTree* op1 = compare->gtGetOp1(); GenTree* op2 = compare->gtGetOp2(); vnStore->GetCompareCheckedBoundArithInfo(compareVN, &info); if (GetVNFuncForNode(op1) == (VNFunc)info.arrOper) { // The arithmetic node is the bound's parent. boundParent = op1; } else if (GetVNFuncForNode(op2) == (VNFunc)info.arrOper) { // The arithmetic node is the bound's parent. boundParent = op2; } } if (boundParent != nullptr) { GenTree* bound = nullptr; // Find which child of boundParent is the bound. Abort if neither // conservative value number matches the one from the compare VN. GenTree* child1 = boundParent->gtGetOp1(); if ((info.vnBound == child1->gtVNPair.GetConservative()) && IS_CSE_INDEX(child1->gtCSEnum)) { bound = child1; } else { GenTree* child2 = boundParent->gtGetOp2(); if ((info.vnBound == child2->gtVNPair.GetConservative()) && IS_CSE_INDEX(child2->gtCSEnum)) { bound = child2; } } if (bound != nullptr) { // Found a checked bound feeding a compare that is a tractable function of it; // record this in the map so we can update the compare VN if the bound // node gets CSEd. if (optCseCheckedBoundMap == nullptr) { // Allocate map on first use. optCseCheckedBoundMap = new (getAllocator(CMK_CSE)) NodeToNodeMap(getAllocator()); } optCseCheckedBoundMap->Set(bound, compare); } } } /***************************************************************************** * * Compute each blocks bbCseGen * This is the bitset that represents the CSEs that are generated within the block * Also initialize bbCseIn, bbCseOut and bbCseGen sets for all blocks */ void Compiler::optValnumCSE_InitDataFlow() { // BitVec trait information for computing CSE availability using the CSE_DataFlow algorithm. // Two bits are allocated per CSE candidate to compute CSE availability // plus an extra bit to handle the initial unvisited case. // (See CSE_DataFlow::EndMerge for an explaination of why this is necessary) // // The two bits per CSE candidate have the following meanings: // 11 - The CSE is available, and is also available when considering calls as killing availability. // 10 - The CSE is available, but is not available when considering calls as killing availability. // 00 - The CSE is not available // 01 - An illegal combination // const unsigned bitCount = (optCSECandidateCount * 2) + 1; // Init traits and cseCallKillsMask bitvectors. cseLivenessTraits = new (getAllocator(CMK_CSE)) BitVecTraits(bitCount, this); cseCallKillsMask = BitVecOps::MakeEmpty(cseLivenessTraits); for (unsigned inx = 1; inx <= optCSECandidateCount; inx++) { unsigned cseAvailBit = getCSEAvailBit(inx); // a one preserves availability and a zero kills the availability // we generate this kind of bit pattern: 101010101010 // BitVecOps::AddElemD(cseLivenessTraits, cseCallKillsMask, cseAvailBit); } for (BasicBlock* const block : Blocks()) { /* Initialize the blocks's bbCseIn set */ bool init_to_zero = false; if (block == fgFirstBB) { /* Clear bbCseIn for the entry block */ init_to_zero = true; } #if !CSE_INTO_HANDLERS else { if (bbIsHandlerBeg(block)) { /* Clear everything on entry to filters or handlers */ init_to_zero = true; } } #endif if (init_to_zero) { /* Initialize to {ZERO} prior to dataflow */ block->bbCseIn = BitVecOps::MakeEmpty(cseLivenessTraits); } else { /* Initialize to {ALL} prior to dataflow */ block->bbCseIn = BitVecOps::MakeFull(cseLivenessTraits); } block->bbCseOut = BitVecOps::MakeFull(cseLivenessTraits); /* Initialize to {ZERO} prior to locating the CSE candidates */ block->bbCseGen = BitVecOps::MakeEmpty(cseLivenessTraits); } // We walk the set of CSE candidates and set the bit corresponding to the CSEindex // in the block's bbCseGen bitset // for (unsigned inx = 0; inx < optCSECandidateCount; inx++) { CSEdsc* dsc = optCSEtab[inx]; unsigned CSEindex = dsc->csdIndex; treeStmtLst* lst = dsc->csdTreeList; noway_assert(lst); while (lst != nullptr) { BasicBlock* block = lst->tslBlock; unsigned cseAvailBit = getCSEAvailBit(CSEindex); unsigned cseAvailCrossCallBit = getCSEAvailCrossCallBit(CSEindex); // This CSE is generated in 'block', we always set the cseAvailBit // If this block does not contain a call, we also set cseAvailCrossCallBit // // If we have a call in this block then in the loop below we walk the trees // backwards to find any CSEs that are generated after the last call in the block. // BitVecOps::AddElemD(cseLivenessTraits, block->bbCseGen, cseAvailBit); if ((block->bbFlags & BBF_HAS_CALL) == 0) { BitVecOps::AddElemD(cseLivenessTraits, block->bbCseGen, cseAvailCrossCallBit); } lst = lst->tslNext; } } for (BasicBlock* const block : Blocks()) { // If the block doesn't contains a call then skip it... // if ((block->bbFlags & BBF_HAS_CALL) == 0) { continue; } // We only need to examine blocks that generate CSEs // if (BitVecOps::IsEmpty(cseLivenessTraits, block->bbCseGen)) { continue; } // If the block contains a call and generates CSEs, we may need to update // the bbCseGen set as we may generate some CSEs after the last call in the block. // // We walk the statements in this basic block starting at the end and walking backwards, // until we reach the first call // Statement* stmt = block->lastStmt(); bool foundCall = false; while (!foundCall) { // Also walk the tree in the backwards direction (bottom up) // looking for CSE's and updating block->bbCseGen // When we reach a call node, we can exit the for loop // for (GenTree* tree = stmt->GetRootNode(); tree != nullptr; tree = tree->gtPrev) { if (IS_CSE_INDEX(tree->gtCSEnum)) { unsigned CSEnum = GET_CSE_INDEX(tree->gtCSEnum); unsigned cseAvailCrossCallBit = getCSEAvailCrossCallBit(CSEnum); BitVecOps::AddElemD(cseLivenessTraits, block->bbCseGen, cseAvailCrossCallBit); } if (tree->OperGet() == GT_CALL) { // Any cse's that we haven't placed in the block->bbCseGen set // aren't currently alive (using cseAvailCrossCallBit) // foundCall = true; break; } } // The JIT can sometimes remove the only call in the block if (stmt == block->firstStmt()) { break; } stmt = stmt->GetPrevStmt(); } } #ifdef DEBUG // Dump out the bbCseGen information that we just created // if (verbose) { bool headerPrinted = false; for (BasicBlock* const block : Blocks()) { if (!BitVecOps::IsEmpty(cseLivenessTraits, block->bbCseGen)) { if (!headerPrinted) { printf("\nBlocks that generate CSE def/uses\n"); headerPrinted = true; } printf(FMT_BB " cseGen = ", block->bbNum); optPrintCSEDataFlowSet(block->bbCseGen); printf("\n"); } } } fgDebugCheckLinks(); #endif // DEBUG } /***************************************************************************** * * CSE Dataflow, so that all helper methods for dataflow are in a single place * */ class CSE_DataFlow { Compiler* m_comp; EXPSET_TP m_preMergeOut; public: CSE_DataFlow(Compiler* pCompiler) : m_comp(pCompiler), m_preMergeOut(BitVecOps::UninitVal()) { } // At the start of the merge function of the dataflow equations, initialize premerge state (to detect changes.) void StartMerge(BasicBlock* block) { // Record the initial value of block->bbCseOut in m_preMergeOut. // It is used in EndMerge() to control the termination of the DataFlow algorithm. // Note that the first time we visit a block, the value of bbCseOut is MakeFull() // BitVecOps::Assign(m_comp->cseLivenessTraits, m_preMergeOut, block->bbCseOut); #if 0 #ifdef DEBUG if (m_comp->verbose) { printf("StartMerge " FMT_BB "\n", block->bbNum); printf(" :: cseOut = %s\n", genES2str(m_comp->cseLivenessTraits, block->bbCseOut)); } #endif // DEBUG #endif // 0 } // Merge: perform the merging of each of the predecessor's liveness values (since this is a forward analysis) void Merge(BasicBlock* block, BasicBlock* predBlock, unsigned dupCount) { #if 0 #ifdef DEBUG if (m_comp->verbose) { printf("Merge " FMT_BB " and " FMT_BB "\n", block->bbNum, predBlock->bbNum); printf(" :: cseIn = %s\n", genES2str(m_comp->cseLivenessTraits, block->bbCseIn)); printf(" :: cseOut = %s\n", genES2str(m_comp->cseLivenessTraits, block->bbCseOut)); } #endif // DEBUG #endif // 0 BitVecOps::IntersectionD(m_comp->cseLivenessTraits, block->bbCseIn, predBlock->bbCseOut); #if 0 #ifdef DEBUG if (m_comp->verbose) { printf(" => cseIn = %s\n", genES2str(m_comp->cseLivenessTraits, block->bbCseIn)); } #endif // DEBUG #endif // 0 } //------------------------------------------------------------------------ // MergeHandler: Merge CSE values into the first exception handler/filter block. // // Arguments: // block - the block that is the start of a handler or filter; // firstTryBlock - the first block of the try for "block" handler; // lastTryBlock - the last block of the try for "block" handler;. // // Notes: // We can jump to the handler from any instruction in the try region. // It means we can propagate only CSE that are valid for the whole try region. void MergeHandler(BasicBlock* block, BasicBlock* firstTryBlock, BasicBlock* lastTryBlock) { // TODO CQ: add CSE for handler blocks, CSE_INTO_HANDLERS should be defined. } // At the end of the merge store results of the dataflow equations, in a postmerge state. // We also handle the case where calls conditionally kill CSE availabilty. // bool EndMerge(BasicBlock* block) { // We can skip the calls kill step when our block doesn't have a callsite // or we don't have any available CSEs in our bbCseIn // if (((block->bbFlags & BBF_HAS_CALL) == 0) || BitVecOps::IsEmpty(m_comp->cseLivenessTraits, block->bbCseIn)) { // No callsite in 'block' or 'block->bbCseIn was empty, so we can use bbCseIn directly // BitVecOps::DataFlowD(m_comp->cseLivenessTraits, block->bbCseOut, block->bbCseGen, block->bbCseIn); } else { // We will create a temporary BitVec to pass to DataFlowD() // EXPSET_TP cseIn_withCallsKill = BitVecOps::UninitVal(); // cseIn_withCallsKill is set to (bbCseIn AND cseCallKillsMask) // BitVecOps::Assign(m_comp->cseLivenessTraits, cseIn_withCallsKill, block->bbCseIn); BitVecOps::IntersectionD(m_comp->cseLivenessTraits, cseIn_withCallsKill, m_comp->cseCallKillsMask); // Call DataFlowD with the modified BitVec: (bbCseIn AND cseCallKillsMask) // BitVecOps::DataFlowD(m_comp->cseLivenessTraits, block->bbCseOut, block->bbCseGen, cseIn_withCallsKill); } // The bool 'notDone' is our terminating condition. // If it is 'true' then the initial value of m_preMergeOut was different than the final value that // we computed for bbCseOut. When it is true we will visit every the successor of 'block' // // This is also why we need to allocate an extra bit in our cseLivenessTrair BitVecs. // We always need to visit our successor blocks once, thus we require that that the first time // that we visit a block we have a bit set in m_preMergeOut that won't be set when we compute // the new value of bbCseOut. // bool notDone = !BitVecOps::Equal(m_comp->cseLivenessTraits, block->bbCseOut, m_preMergeOut); #if 0 #ifdef DEBUG if (m_comp->verbose) { printf("EndMerge " FMT_BB "\n", block->bbNum); printf(" :: cseIn = %s\n", genES2str(m_comp->cseLivenessTraits, block->bbCseIn)); if (((block->bbFlags & BBF_HAS_CALL) != 0) && !BitVecOps::IsEmpty(m_comp->cseLivenessTraits, block->bbCseIn)) { printf(" -- cseKill = %s\n", genES2str(m_comp->cseLivenessTraits, m_comp->cseCallKillsMask)); } printf(" :: cseGen = %s\n", genES2str(m_comp->cseLivenessTraits, block->bbCseGen)); printf(" => cseOut = %s\n", genES2str(m_comp->cseLivenessTraits, block->bbCseOut)); printf(" != preMerge = %s, => %s\n", genES2str(m_comp->cseLivenessTraits, m_preMergeOut), notDone ? "true" : "false"); } #endif // DEBUG #endif // 0 return notDone; } }; /***************************************************************************** * * Perform a DataFlow forward analysis using the block CSE bitsets: * Inputs: * bbCseGen - Exact CSEs that are always generated within the block * bbCseIn - Maximal estimate of CSEs that are/could be available at input to the block * bbCseOut - Maximal estimate of CSEs that are/could be available at exit to the block * * Outputs: * bbCseIn - Computed CSEs that are available at input to the block * bbCseOut - Computed CSEs that are available at exit to the block */ void Compiler::optValnumCSE_DataFlow() { #ifdef DEBUG if (verbose) { printf("\nPerforming DataFlow for ValnumCSE's\n"); } #endif // DEBUG CSE_DataFlow cse(this); // Modified dataflow algorithm for available expressions. DataFlow cse_flow(this); cse_flow.ForwardAnalysis(cse); #ifdef DEBUG if (verbose) { printf("\nAfter performing DataFlow for ValnumCSE's\n"); for (BasicBlock* const block : Blocks()) { printf(FMT_BB " in gen out\n", block->bbNum); optPrintCSEDataFlowSet(block->bbCseIn); printf("\n"); optPrintCSEDataFlowSet(block->bbCseGen); printf("\n"); optPrintCSEDataFlowSet(block->bbCseOut); printf("\n"); } printf("\n"); } #endif // DEBUG } //--------------------------------------------------------------------------- // optValnumCSE_Availablity: // // Using the information computed by CSE_DataFlow determine for each // CSE whether the CSE is a definition (if the CSE was not available) // or if the CSE is a use (if the CSE was previously made available). // The implementation iterates over all blocks setting 'available_cses' // to the CSEs that are available at input to the block. // When a CSE expression is encountered it is classified as either // as a definition (if the CSE is not in the 'available_cses' set) or // as a use (if the CSE is in the 'available_cses' set). If the CSE // is a definition then it is added to the 'available_cses' set. // // This algorithm uncovers the defs and uses gradually and as it does // so it also builds the exception set that all defs make: 'defExcSetCurrent' // and the exception set that the uses we have seen depend upon: 'defExcSetPromise'. // // Typically expressions with the same normal ValueNum generate exactly the // same exception sets. There are two way that we can get different exception // sets with the same Normal value number. // // 1. We used an arithmetic identiity: // e.g. (p.a + q.b) * 0 :: The normal value for the expression is zero // and we have NullPtrExc(p) and NullPtrExc(q) // e.g. (p.a - p.a) :: The normal value for the expression is zero // and we have NullPtrExc(p) // 2. We stored an expression into a LclVar or into Memory and read it later // e.g. t = p.a; // e1 = (t + q.b) :: e1 has one NullPtrExc and e2 has two. // e2 = (p.a + q.b) but both compute the same normal value // e.g. m.a = p.a; // e1 = (m.a + q.b) :: e1 and e2 have different exception sets. // e2 = (p.a + q.b) but both compute the same normal value // void Compiler::optValnumCSE_Availablity() { #ifdef DEBUG if (verbose) { printf("Labeling the CSEs with Use/Def information\n"); } #endif EXPSET_TP available_cses = BitVecOps::MakeEmpty(cseLivenessTraits); for (BasicBlock* const block : Blocks()) { // Make the block publicly available compCurBB = block; // Retrieve the available CSE's at the start of this block BitVecOps::Assign(cseLivenessTraits, available_cses, block->bbCseIn); // Walk the statement trees in this basic block for (Statement* const stmt : block->NonPhiStatements()) { // We walk the tree in the forwards direction (bottom up) for (GenTree* const tree : stmt->TreeList()) { bool isUse = false; bool isDef = false; if (IS_CSE_INDEX(tree->gtCSEnum)) { unsigned CSEnum = GET_CSE_INDEX(tree->gtCSEnum); unsigned cseAvailBit = getCSEAvailBit(CSEnum); unsigned cseAvailCrossCallBit = getCSEAvailCrossCallBit(CSEnum); CSEdsc* desc = optCSEfindDsc(CSEnum); weight_t stmw = block->getBBWeight(this); isUse = BitVecOps::IsMember(cseLivenessTraits, available_cses, cseAvailBit); isDef = !isUse; // If is isn't a CSE use, it is a CSE def // Is this a "use", that we haven't yet marked as live across a call // and it is not available when we have calls that kill CSE's (cseAvailCrossCallBit) // if the above is true then we will mark this the CSE as live across a call // bool madeLiveAcrossCall = false; if (isUse && !desc->csdLiveAcrossCall && !BitVecOps::IsMember(cseLivenessTraits, available_cses, cseAvailCrossCallBit)) { desc->csdLiveAcrossCall = true; madeLiveAcrossCall = true; } #ifdef DEBUG // If this is a CSE def (i.e. the CSE is not available here, since it is being defined), then the // call-kill bit // should also be zero since it is also not available across a call. // if (isDef) { assert(!BitVecOps::IsMember(cseLivenessTraits, available_cses, cseAvailCrossCallBit)); } if (verbose) { printf(FMT_BB " ", block->bbNum); printTreeID(tree); printf(" %s of " FMT_CSE " [weight=%s]%s\n", isUse ? "Use" : "Def", CSEnum, refCntWtd2str(stmw), madeLiveAcrossCall ? " *** Now Live Across Call ***" : ""); } #endif // DEBUG // Have we decided to abandon work on this CSE? if (desc->defExcSetPromise == ValueNumStore::NoVN) { // This candidate had defs with differing liberal exc set VNs // We have abandoned CSE promotion for this candidate // Clear the CSE flag tree->gtCSEnum = NO_CSE; JITDUMP(" Abandoned - CSE candidate has defs with different exception sets!\n"); continue; } // Record the exception set for tree's liberal value number // ValueNum theLiberalExcSet = vnStore->VNExceptionSet(tree->gtVNPair.GetLiberal()); // Is this a CSE use or a def? if (isDef) { // This is a CSE def // Is defExcSetCurrent still set to the uninit marker value of VNForNull() ? if (desc->defExcSetCurrent == vnStore->VNForNull()) { // This is the first time visited, so record this defs exception set desc->defExcSetCurrent = theLiberalExcSet; } // Have we seen a CSE use and made a promise of an exception set? // if (desc->defExcSetPromise != vnStore->VNForEmptyExcSet()) { // The exeception set held in desc->defExcSetPromise must be a subset of theLiberalExcSet // if (vnStore->VNExcIsSubset(theLiberalExcSet, desc->defExcSetPromise)) { // This new def still satisfies any promise made to all the CSE uses that we have // encountered // // no update is needed when these are the same VN if (desc->defExcSetCurrent != theLiberalExcSet) { // We will change the value of desc->defExcSetCurrent to be the intersection of // these two sets. // This is the set of exceptions that all CSE defs have (that we have visited so // far) // ValueNum intersectionExcSet = vnStore->VNExcSetIntersection(desc->defExcSetCurrent, theLiberalExcSet); #ifdef DEBUG if (this->verbose) { VNFuncApp excSeq; vnStore->GetVNFunc(desc->defExcSetCurrent, &excSeq); printf(">>> defExcSetCurrent is "); vnStore->vnDumpExcSeq(this, &excSeq, true); printf("\n"); vnStore->GetVNFunc(theLiberalExcSet, &excSeq); printf(">>> theLiberalExcSet is "); vnStore->vnDumpExcSeq(this, &excSeq, true); printf("\n"); if (intersectionExcSet == vnStore->VNForEmptyExcSet()) { printf(">>> the intersectionExcSet is the EmptyExcSet\n"); } else { vnStore->GetVNFunc(intersectionExcSet, &excSeq); printf(">>> the intersectionExcSet is "); vnStore->vnDumpExcSeq(this, &excSeq, true); printf("\n"); } } #endif // DEBUG // Change the defExcSetCurrent to be a subset of its prior value // assert(vnStore->VNExcIsSubset(desc->defExcSetCurrent, intersectionExcSet)); desc->defExcSetCurrent = intersectionExcSet; } } else // This CSE def doesn't satisfy one of the exceptions already promised to a CSE use { // So, we will abandon all CSE promotions for this candidate // // We use the marker value of NoVN to indicate that we // should abandon this CSE candidate // desc->defExcSetPromise = ValueNumStore::NoVN; tree->gtCSEnum = NO_CSE; JITDUMP(" Abandon - CSE candidate has defs with exception sets that do not satisfy " "some CSE use\n"); continue; } } // For shared const CSE we don't set/use the defConservNormVN // if (!Is_Shared_Const_CSE(desc->csdHashKey)) { // Record or update the value of desc->defConservNormVN // ValueNum theConservNormVN = vnStore->VNConservativeNormalValue(tree->gtVNPair); // Is defConservNormVN still set to the uninit marker value of VNForNull() ? if (desc->defConservNormVN == vnStore->VNForNull()) { // This is the first def that we have visited, set defConservNormVN desc->defConservNormVN = theConservNormVN; } else { // Check to see if all defs have the same conservative normal VN if (theConservNormVN != desc->defConservNormVN) { // This candidate has defs with differing conservative normal VNs, mark it with NoVN desc->defConservNormVN = ValueNumStore::NoVN; // record the marker for differing VNs } } } // If we get here we have accepted this node as a valid CSE def desc->csdDefCount += 1; desc->csdDefWtCnt += stmw; // Mark the node as a CSE definition tree->gtCSEnum = TO_CSE_DEF(tree->gtCSEnum); // This CSE becomes available after this def BitVecOps::AddElemD(cseLivenessTraits, available_cses, cseAvailBit); BitVecOps::AddElemD(cseLivenessTraits, available_cses, cseAvailCrossCallBit); } else // We are visiting a CSE use { assert(isUse); // If the CSE use has no requirements for an exception set then we don't have to do anything // here // if (theLiberalExcSet != vnStore->VNForEmptyExcSet()) { // Are we visiting a use first, before visiting any defs of this CSE? // This is an atypical case that can occur with a bottom tested loop. // // Is defExcSetCurrent still set to the uninit marker value of VNForNull() ? if (desc->defExcSetCurrent == vnStore->VNForNull()) { // Update defExcSetPromise, this is our required exception set for all CSE defs // that we encounter later. // // We could see multiple uses before a def, so we require the Union of all exception // sets // desc->defExcSetPromise = vnStore->VNExcSetUnion(desc->defExcSetPromise, theLiberalExcSet); } else // we have already seen a def for this CSE and defExcSetCurrent is setup { if (vnStore->VNExcIsSubset(desc->defExcSetCurrent, theLiberalExcSet)) { // The current set of exceptions produced by all CSE defs have (that we have // visited so far) meets our requirement // // Add any exception items to the defExcSetPromise set // desc->defExcSetPromise = vnStore->VNExcSetUnion(desc->defExcSetPromise, theLiberalExcSet); } } // At this point defExcSetPromise contains all of the exception items that we can promise // here. // if (!vnStore->VNExcIsSubset(desc->defExcSetPromise, theLiberalExcSet)) { // We can't safely make this into a CSE use, because this // CSE use has an exception set item that is not promised // by all of our CSE defs. // // We will omit this CSE use from the graph and proceed, // the other uses and defs can still participate in the CSE optimization. // So this can't be a CSE use tree->gtCSEnum = NO_CSE; JITDUMP(" NO_CSE - This use has an exception set item that isn't contained in the " "defs!\n"); continue; } } // When we get here we have accepted this node as a valid CSE use desc->csdUseCount += 1; desc->csdUseWtCnt += stmw; } } // In order to determine if a CSE is live across a call, we model availablity using two bits and // kill all of the cseAvailCrossCallBit for each CSE whenever we see a GT_CALL (unless the call // generates a CSE). // if (tree->OperGet() == GT_CALL) { // Check for the common case of an already empty available_cses set // and thus nothing needs to be killed // if (!(BitVecOps::IsEmpty(cseLivenessTraits, available_cses))) { if (isUse) { // For a CSE Use we will assume that the CSE logic will replace it with a CSE LclVar and // not make the call so kill nothing } else { // partially kill any cse's that are currently alive (using the cseCallKillsMask set) // BitVecOps::IntersectionD(cseLivenessTraits, available_cses, cseCallKillsMask); if (isDef) { // We can have a GT_CALL that produces a CSE, // (i.e. HELPER.CORINFO_HELP_GETSHARED_*STATIC_BASE or // CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) // // The CSE becomes available after the call, so set the cseAvailCrossCallBit bit in // available_cses // unsigned CSEnum = GET_CSE_INDEX(tree->gtCSEnum); unsigned cseAvailCrossCallBit = getCSEAvailCrossCallBit(CSEnum); BitVecOps::AddElemD(cseLivenessTraits, available_cses, cseAvailCrossCallBit); } } } } } } } } // The following class handles the CSE heuristics // we use a complex set of heuristic rules // to determine if it is likely to be profitable to perform this CSE // class CSE_Heuristic { Compiler* m_pCompiler; unsigned m_addCSEcount; weight_t aggressiveRefCnt; weight_t moderateRefCnt; unsigned enregCount; // count of the number of predicted enregistered variables bool largeFrame; bool hugeFrame; Compiler::codeOptimize codeOptKind; Compiler::CSEdsc** sortTab; size_t sortSiz; #ifdef DEBUG CLRRandom m_cseRNG; unsigned m_bias; #endif public: CSE_Heuristic(Compiler* pCompiler) : m_pCompiler(pCompiler) { codeOptKind = m_pCompiler->compCodeOpt(); } Compiler::codeOptimize CodeOptKind() { return codeOptKind; } // Perform the Initialization step for our CSE Heuristics // determine the various cut off values to use for // the aggressive, moderate and conservative CSE promotions // count the number of enregisterable variables // determine if the method has a large or huge stack frame. // void Initialize() { m_addCSEcount = 0; /* Count of the number of LclVars for CSEs that we added */ // Record the weighted ref count of the last "for sure" callee saved LclVar aggressiveRefCnt = 0; moderateRefCnt = 0; enregCount = 0; largeFrame = false; hugeFrame = false; sortTab = nullptr; sortSiz = 0; unsigned frameSize = 0; unsigned regAvailEstimate = ((CNT_CALLEE_ENREG * 3) + (CNT_CALLEE_TRASH * 2) + 1); unsigned lclNum; LclVarDsc* varDsc; for (lclNum = 0, varDsc = m_pCompiler->lvaTable; lclNum < m_pCompiler->lvaCount; lclNum++, varDsc++) { // Locals with no references don't use any local stack frame slots if (varDsc->lvRefCnt() == 0) { continue; } // Incoming stack arguments don't use any local stack frame slots if (varDsc->lvIsParam && !varDsc->lvIsRegArg) { continue; } #if FEATURE_FIXED_OUT_ARGS // Skip the OutgoingArgArea in computing frame size, since // its size is not yet known and it doesn't affect local // offsets from the frame pointer (though it may affect // them from the stack pointer). noway_assert(m_pCompiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM); if (lclNum == m_pCompiler->lvaOutgoingArgSpaceVar) { continue; } #endif // FEATURE_FIXED_OUT_ARGS bool onStack = (regAvailEstimate == 0); // true when it is likely that this LclVar will have a stack home // Some LclVars always have stack homes if ((varDsc->lvDoNotEnregister) || (varDsc->lvType == TYP_LCLBLK)) { onStack = true; } #ifdef TARGET_X86 // Treat floating point and 64 bit integers as always on the stack if (varTypeIsFloating(varDsc->TypeGet()) || varTypeIsLong(varDsc->TypeGet())) onStack = true; #endif if (onStack) { frameSize += m_pCompiler->lvaLclSize(lclNum); } else { // For the purposes of estimating the frameSize we // will consider this LclVar as being enregistered. // Now we reduce the remaining regAvailEstimate by // an appropriate amount. // if (varDsc->lvRefCnt() <= 2) { // a single use single def LclVar only uses 1 regAvailEstimate -= 1; } else { // a LclVar with multiple uses and defs uses 2 if (regAvailEstimate >= 2) { regAvailEstimate -= 2; } else { // Don't try to subtract when regAvailEstimate is 1 regAvailEstimate = 0; } } } #ifdef TARGET_XARCH if (frameSize > 0x080) { // We likely have a large stack frame. // // On XARCH stack frame displacements can either use a 1-byte or a 4-byte displacement // with a large franme we will need to use some 4-byte displacements. // largeFrame = true; break; // early out, we don't need to keep increasing frameSize } #elif defined(TARGET_ARM) if (frameSize > 0x0400) { // We likely have a large stack frame. // // Thus we might need to use large displacements when loading or storing // to CSE LclVars that are not enregistered // On ARM32 this means using rsGetRsvdReg() to hold the large displacement largeFrame = true; } if (frameSize > 0x10000) { hugeFrame = true; break; // early out, we don't need to keep increasing frameSize } #elif defined(TARGET_ARM64) if (frameSize > 0x1000) { // We likely have a large stack frame. // // Thus we might need to use large displacements when loading or storing // to CSE LclVars that are not enregistered // On ARM64 this means using rsGetRsvdReg() to hold the large displacement // largeFrame = true; break; // early out, we don't need to keep increasing frameSize } #endif } // Iterate over the sorted list of tracked local variables // these are the register candidates for LSRA // We normally vist the LclVar in order of their weighted ref counts // and our hueristic assumes that the highest weighted ref count // LclVars will be enregistered and that the lowest weighted ref count // are likely be allocated in the stack frame. // The value of enregCount is incremented when we visit a LclVar // that can be enregistered. // for (unsigned trackedIndex = 0; trackedIndex < m_pCompiler->lvaTrackedCount; trackedIndex++) { LclVarDsc* varDsc = m_pCompiler->lvaGetDescByTrackedIndex(trackedIndex); var_types varTyp = varDsc->TypeGet(); // Locals with no references aren't enregistered if (varDsc->lvRefCnt() == 0) { continue; } // Some LclVars always have stack homes if ((varDsc->lvDoNotEnregister) || (varDsc->lvType == TYP_LCLBLK)) { continue; } // The enregCount only tracks the uses of integer registers // // We could track floating point register usage seperately // but it isn't worth the additional complexity as floating point CSEs // are rare and we typically have plenty of floating point register available. // if (!varTypeIsFloating(varTyp)) { enregCount++; // The primitive types, including TYP_SIMD types use one register #ifndef TARGET_64BIT if (varTyp == TYP_LONG) { enregCount++; // on 32-bit targets longs use two registers } #endif } // Set the cut off values to use for deciding when we want to use aggressive, moderate or conservative // // The value of aggressiveRefCnt and moderateRefCnt start off as zero and // when enregCount reached a certain value we assign the current LclVar // (weighted) ref count to aggressiveRefCnt or moderateRefCnt. // const unsigned aggressiveEnregNum = (CNT_CALLEE_ENREG * 3 / 2); const unsigned moderateEnregNum = ((CNT_CALLEE_ENREG * 3) + (CNT_CALLEE_TRASH * 2)); // // On Windows x64 this yeilds: // aggressiveEnregNum == 12 and moderateEnregNum == 38 // Thus we will typically set the cutoff values for // aggressiveRefCnt based upon the weight of T13 (the 13th tracked LclVar) // moderateRefCnt based upon the weight of T39 (the 39th tracked LclVar) // // For other architecture and platforms these values dynamically change // based upon the number of callee saved and callee scratch registers. // if ((aggressiveRefCnt == 0) && (enregCount > aggressiveEnregNum)) { if (CodeOptKind() == Compiler::SMALL_CODE) { aggressiveRefCnt = varDsc->lvRefCnt(); } else { aggressiveRefCnt = varDsc->lvRefCntWtd(); } aggressiveRefCnt += BB_UNITY_WEIGHT; } if ((moderateRefCnt == 0) && (enregCount > ((CNT_CALLEE_ENREG * 3) + (CNT_CALLEE_TRASH * 2)))) { if (CodeOptKind() == Compiler::SMALL_CODE) { moderateRefCnt = varDsc->lvRefCnt(); } else { moderateRefCnt = varDsc->lvRefCntWtd(); } moderateRefCnt += (BB_UNITY_WEIGHT / 2); } } // The minumum value that we want to use for aggressiveRefCnt is BB_UNITY_WEIGHT * 2 // so increase it when we are below that value // aggressiveRefCnt = max(BB_UNITY_WEIGHT * 2, aggressiveRefCnt); // The minumum value that we want to use for moderateRefCnt is BB_UNITY_WEIGHT // so increase it when we are below that value // moderateRefCnt = max(BB_UNITY_WEIGHT, moderateRefCnt); #ifdef DEBUG if (m_pCompiler->verbose) { printf("\n"); printf("Aggressive CSE Promotion cutoff is %f\n", aggressiveRefCnt); printf("Moderate CSE Promotion cutoff is %f\n", moderateRefCnt); printf("enregCount is %u\n", enregCount); printf("Framesize estimate is 0x%04X\n", frameSize); printf("We have a %s frame\n", hugeFrame ? "huge" : (largeFrame ? "large" : "small")); } #endif } void SortCandidates() { /* Create an expression table sorted by decreasing cost */ sortTab = new (m_pCompiler, CMK_CSE) Compiler::CSEdsc*[m_pCompiler->optCSECandidateCount]; sortSiz = m_pCompiler->optCSECandidateCount * sizeof(*sortTab); memcpy(sortTab, m_pCompiler->optCSEtab, sortSiz); if (CodeOptKind() == Compiler::SMALL_CODE) { jitstd::sort(sortTab, sortTab + m_pCompiler->optCSECandidateCount, Compiler::optCSEcostCmpSz()); } else { jitstd::sort(sortTab, sortTab + m_pCompiler->optCSECandidateCount, Compiler::optCSEcostCmpEx()); } #ifdef DEBUG if (m_pCompiler->verbose) { printf("\nSorted CSE candidates:\n"); /* Print out the CSE candidates */ for (unsigned cnt = 0; cnt < m_pCompiler->optCSECandidateCount; cnt++) { Compiler::CSEdsc* dsc = sortTab[cnt]; GenTree* expr = dsc->csdTree; weight_t def; weight_t use; unsigned cost; if (CodeOptKind() == Compiler::SMALL_CODE) { def = dsc->csdDefCount; // def count use = dsc->csdUseCount; // use count (excluding the implicit uses at defs) cost = dsc->csdTree->GetCostSz(); } else { def = dsc->csdDefWtCnt; // weighted def count use = dsc->csdUseWtCnt; // weighted use count (excluding the implicit uses at defs) cost = dsc->csdTree->GetCostEx(); } if (!Compiler::Is_Shared_Const_CSE(dsc->csdHashKey)) { printf(FMT_CSE ", {$%-3x, $%-3x} useCnt=%d: [def=%3f, use=%3f, cost=%3u%s]\n :: ", dsc->csdIndex, dsc->csdHashKey, dsc->defExcSetPromise, dsc->csdUseCount, def, use, cost, dsc->csdLiveAcrossCall ? ", call" : " "); } else { size_t kVal = Compiler::Decode_Shared_Const_CSE_Value(dsc->csdHashKey); printf(FMT_CSE ", {K_%p} useCnt=%d: [def=%3f, use=%3f, cost=%3u%s]\n :: ", dsc->csdIndex, dspPtr(kVal), dsc->csdUseCount, def, use, cost, dsc->csdLiveAcrossCall ? ", call" : " "); } m_pCompiler->gtDispTree(expr, nullptr, nullptr, true); } printf("\n"); } #endif // DEBUG } // The following class nested within CSE_Heuristic encapsulates the information // about the current CSE candidate that is under consideration // // TODO-Cleanup: This is still very much based upon the old Lexical CSE implementation // and needs to be reworked for the Value Number based implementation // class CSE_Candidate { CSE_Heuristic* m_context; Compiler::CSEdsc* m_CseDsc; unsigned m_cseIndex; weight_t m_defCount; weight_t m_useCount; unsigned m_Cost; unsigned m_Size; // When this Candidate is successfully promoted to a CSE we record // the following information about what category was used when promoting it. // // We will set m_Aggressive: // When we believe that the CSE very valuable in terms of weighted ref counts, // such that it would always be enregistered by the register allocator. // // We will set m_Moderate: // When we believe that the CSE is moderately valuable in terms of weighted ref counts, // such that it is more likely than not to be enregistered by the register allocator // // We will set m_Conservative: // When we didn't set m_Aggressive or m_Moderate. // Such candidates typically are expensive to compute and thus are // always profitable to promote even when they aren't enregistered. // // We will set m_StressCSE: // When the candidate is only being promoted because of a Stress mode. // bool m_Aggressive; bool m_Moderate; bool m_Conservative; bool m_StressCSE; public: CSE_Candidate(CSE_Heuristic* context, Compiler::CSEdsc* cseDsc) : m_context(context) , m_CseDsc(cseDsc) , m_cseIndex(m_CseDsc->csdIndex) , m_defCount(0) , m_useCount(0) , m_Cost(0) , m_Size(0) , m_Aggressive(false) , m_Moderate(false) , m_Conservative(false) , m_StressCSE(false) { } Compiler::CSEdsc* CseDsc() { return m_CseDsc; } unsigned CseIndex() { return m_cseIndex; } weight_t DefCount() { return m_defCount; } weight_t UseCount() { return m_useCount; } // TODO-CQ: With ValNum CSE's the Expr and its cost can vary. GenTree* Expr() { return m_CseDsc->csdTree; } unsigned Cost() { return m_Cost; } unsigned Size() { return m_Size; } bool IsSharedConst() { return m_CseDsc->csdIsSharedConst; } bool LiveAcrossCall() { return m_CseDsc->csdLiveAcrossCall; } void SetAggressive() { m_Aggressive = true; } bool IsAggressive() { return m_Aggressive; } void SetModerate() { m_Moderate = true; } bool IsModerate() { return m_Moderate; } void SetConservative() { m_Conservative = true; } bool IsConservative() { return m_Conservative; } void SetStressCSE() { m_StressCSE = true; } bool IsStressCSE() { return m_StressCSE; } void InitializeCounts() { m_Size = Expr()->GetCostSz(); // always the GetCostSz() if (m_context->CodeOptKind() == Compiler::SMALL_CODE) { m_Cost = m_Size; // the estimated code size m_defCount = m_CseDsc->csdDefCount; // def count m_useCount = m_CseDsc->csdUseCount; // use count (excluding the implicit uses at defs) } else { m_Cost = Expr()->GetCostEx(); // the estimated execution cost m_defCount = m_CseDsc->csdDefWtCnt; // weighted def count m_useCount = m_CseDsc->csdUseWtCnt; // weighted use count (excluding the implicit uses at defs) } } }; #ifdef DEBUG //------------------------------------------------------------------------ // optConfigBiasedCSE: // Stress mode to shuffle the decision to CSE or not using environment // variable COMPlus_JitStressBiasedCSE (= 0 to 100%). When the bias value // is not specified but COMPlus_JitStress is ON, generate a random bias. // // Return Value: // 0 -- This method is indifferent about this CSE (no bias specified and no stress) // 1 -- This CSE must be performed to maintain specified/generated bias. // -1 -- This CSE mustn't be performed to maintain specified/generated bias. // // Operation: // A debug stress only method that returns "1" with probability (P) // defined by: // // P = (COMPlus_JitStressBiasedCSE / 100) (or) // P = (random(100) / 100) when COMPlus_JitStress is specified and // COMPlus_JitStressBiasedCSE is unspecified. // // When specified, the bias is reinterpreted as a decimal number between 0 // to 100. // When bias is not specified, a bias is randomly generated if COMPlus_JitStress // is non-zero. // // Callers are supposed to call this method for each CSE promotion decision // and ignore the call if return value is 0 and honor the 1 with a CSE and // -1 with a no-CSE to maintain the specified/generated bias. // int optConfigBiasedCSE() { // Seed the PRNG, if never done before. if (!m_cseRNG.IsInitialized()) { m_cseRNG.Init(m_pCompiler->info.compMethodHash()); m_bias = m_cseRNG.Next(100); } // Obtain the bias value and reinterpret as decimal. unsigned bias = ReinterpretHexAsDecimal(JitConfig.JitStressBiasedCSE()); // Invalid value, check if JitStress is ON. if (bias > 100) { if (!m_pCompiler->compStressCompile(Compiler::STRESS_MAKE_CSE, MAX_STRESS_WEIGHT)) { // JitStress is OFF for CSE, nothing to do. return 0; } bias = m_bias; JITDUMP("JitStressBiasedCSE is OFF, but JitStress is ON: generated bias=%d.\n", bias); } // Generate a number between (0, 99) and if the generated // number is smaller than bias, then perform CSE. unsigned gen = m_cseRNG.Next(100); int ret = (gen < bias) ? 1 : -1; if (m_pCompiler->verbose) { if (ret < 0) { printf("No CSE because gen=%d >= bias=%d\n", gen, bias); } else { printf("Promoting CSE because gen=%d < bias=%d\n", gen, bias); } } // Indicate whether to perform CSE or not. return ret; } #endif // Given a CSE candidate decide whether it passes or fails the profitability heuristic // return true if we believe that it is profitable to promote this candidate to a CSE // bool PromotionCheck(CSE_Candidate* candidate) { bool result = false; #ifdef DEBUG int stressResult = optConfigBiasedCSE(); if (stressResult != 0) { // Stress is enabled. Check whether to perform CSE or not. if (stressResult > 0) { candidate->SetStressCSE(); return true; } } if (m_pCompiler->optConfigDisableCSE2()) { return false; // skip this CSE } #endif /* Our calculation is based on the following cost estimate formula Existing costs are: (def + use) * cost If we introduce a CSE temp are each definition and replace the use with a CSE temp then our cost is: (def * (cost + cse-def-cost)) + (use * cse-use-cost) We must estimate the values to use for cse-def-cost and cse-use-cost If we are able to enregister the CSE then the cse-use-cost is one and cse-def-cost is either zero or one. Zero in the case where we needed to evaluate the def into a register and we can use that register as the CSE temp as well. If we are unable to enregister the CSE then the cse-use-cost is IND_COST and the cse-def-cost is also IND_COST. If we want to be conservative we use IND_COST as the the value for both cse-def-cost and cse-use-cost and then we never introduce a CSE that could pessimize the execution time of the method. If we want to be more moderate we use (IND_COST_EX + 1) / 2 as the values for both cse-def-cost and cse-use-cost. If we want to be aggressive we use 1 as the values for both cse-def-cost and cse-use-cost. If we believe that the CSE very valuable in terms of weighted ref counts such that it would always be enregistered by the register allocator we choose the aggressive use def costs. If we believe that the CSE is somewhat valuable in terms of weighted ref counts such that it could be likely be enregistered by the register allocator we choose the moderate use def costs. otherwise we choose the conservative use def costs. */ unsigned cse_def_cost; unsigned cse_use_cost; weight_t no_cse_cost = 0; weight_t yes_cse_cost = 0; unsigned extra_yes_cost = 0; unsigned extra_no_cost = 0; // The 'cseRefCnt' is the RefCnt that we will have if we promote this CSE into a new LclVar // Each CSE Def will contain two Refs and each CSE Use will have one Ref of this new LclVar weight_t cseRefCnt = (candidate->DefCount() * 2) + candidate->UseCount(); bool canEnregister = true; unsigned slotCount = 1; if (candidate->Expr()->TypeIs(TYP_STRUCT)) { // This is a non-enregisterable struct. canEnregister = false; CORINFO_CLASS_HANDLE structHnd = m_pCompiler->gtGetStructHandleIfPresent(candidate->Expr()); if (structHnd == NO_CLASS_HANDLE) { JITDUMP("Can't determine the struct size, so we can't consider it for CSE promotion\n"); return false; // Do not make this a CSE } unsigned size = m_pCompiler->info.compCompHnd->getClassSize(structHnd); // Note that the slotCount is used to estimate the reference cost, but it may overestimate this // because it doesn't take into account that we might use a vector register for struct copies. slotCount = (size + TARGET_POINTER_SIZE - 1) / TARGET_POINTER_SIZE; } if (CodeOptKind() == Compiler::SMALL_CODE) { // Note that when optimizing for SMALL_CODE we set the cse_def_cost/cse_use_cost based // upon the code size and we use unweighted ref counts instead of weighted ref counts. // Also note that optimizing for SMALL_CODE is rare, we typically only optimize this way // for class constructors, because we know that they will only run once. // if (cseRefCnt >= aggressiveRefCnt) { // Record that we are choosing to use the aggressive promotion rules // candidate->SetAggressive(); #ifdef DEBUG if (m_pCompiler->verbose) { printf("Aggressive CSE Promotion (%f >= %f)\n", cseRefCnt, aggressiveRefCnt); } #endif // With aggressive promotion we expect that the candidate will be enregistered // so we set the use and def costs to their miniumum values // cse_def_cost = 1; cse_use_cost = 1; // Check if this candidate is likely to live on the stack // if (candidate->LiveAcrossCall() || !canEnregister) { // Increase the costs when we have a large or huge frame // if (largeFrame) { cse_def_cost++; cse_use_cost++; } if (hugeFrame) { cse_def_cost++; cse_use_cost++; } } } else // not aggressiveRefCnt { // Record that we are choosing to use the conservative promotion rules // candidate->SetConservative(); if (largeFrame) { #ifdef DEBUG if (m_pCompiler->verbose) { printf("Codesize CSE Promotion (%s frame)\n", hugeFrame ? "huge" : "large"); } #endif #ifdef TARGET_XARCH /* The following formula is good choice when optimizing CSE for SMALL_CODE */ cse_def_cost = 6; // mov [EBP-0x00001FC],reg cse_use_cost = 5; // [EBP-0x00001FC] #else // TARGET_ARM if (hugeFrame) { cse_def_cost = 10 + 2; // movw/movt r10 and str reg,[sp+r10] cse_use_cost = 10 + 2; } else { cse_def_cost = 6 + 2; // movw r10 and str reg,[sp+r10] cse_use_cost = 6 + 2; } #endif } else // small frame { #ifdef DEBUG if (m_pCompiler->verbose) { printf("Codesize CSE Promotion (small frame)\n"); } #endif #ifdef TARGET_XARCH /* The following formula is good choice when optimizing CSE for SMALL_CODE */ cse_def_cost = 3; // mov [EBP-1C],reg cse_use_cost = 2; // [EBP-1C] #else // TARGET_ARM cse_def_cost = 2; // str reg,[sp+0x9c] cse_use_cost = 2; // ldr reg,[sp+0x9c] #endif } } #ifdef TARGET_AMD64 if (varTypeIsFloating(candidate->Expr()->TypeGet())) { // floating point loads/store encode larger cse_def_cost += 2; cse_use_cost += 1; } #endif // TARGET_AMD64 } else // not SMALL_CODE ... { // Note that when optimizing for BLENDED_CODE or FAST_CODE we set cse_def_cost/cse_use_cost // based upon the execution costs of the code and we use weighted ref counts. // if ((cseRefCnt >= aggressiveRefCnt) && canEnregister) { // Record that we are choosing to use the aggressive promotion rules // candidate->SetAggressive(); #ifdef DEBUG if (m_pCompiler->verbose) { printf("Aggressive CSE Promotion (%f >= %f)\n", cseRefCnt, aggressiveRefCnt); } #endif // With aggressive promotion we expect that the candidate will be enregistered // so we set the use and def costs to their miniumum values // cse_def_cost = 1; cse_use_cost = 1; } else if (cseRefCnt >= moderateRefCnt) { // Record that we are choosing to use the moderate promotion rules // candidate->SetModerate(); if (!candidate->LiveAcrossCall() && canEnregister) { #ifdef DEBUG if (m_pCompiler->verbose) { printf("Moderate CSE Promotion (CSE never live at call) (%f >= %f)\n", cseRefCnt, moderateRefCnt); } #endif cse_def_cost = 2; cse_use_cost = 1; } else // candidate is live across call or not enregisterable. { #ifdef DEBUG if (m_pCompiler->verbose) { printf("Moderate CSE Promotion (%s) (%f >= %f)\n", candidate->LiveAcrossCall() ? "CSE is live across a call" : "not enregisterable", cseRefCnt, moderateRefCnt); } #endif cse_def_cost = 2; if (canEnregister) { if (enregCount < (CNT_CALLEE_ENREG * 3 / 2)) { cse_use_cost = 1; } else { cse_use_cost = 2; } } else { cse_use_cost = 3; } } } else // Conservative CSE promotion { // Record that we are choosing to use the conservative promotion rules // candidate->SetConservative(); if (!candidate->LiveAcrossCall() && canEnregister) { #ifdef DEBUG if (m_pCompiler->verbose) { printf("Conservative CSE Promotion (%s) (%f < %f)\n", candidate->LiveAcrossCall() ? "CSE is live across a call" : "not enregisterable", cseRefCnt, moderateRefCnt); } #endif cse_def_cost = 2; cse_use_cost = 2; } else // candidate is live across call { #ifdef DEBUG if (m_pCompiler->verbose) { printf("Conservative CSE Promotion (%f < %f)\n", cseRefCnt, moderateRefCnt); } #endif cse_def_cost = 2; cse_use_cost = 3; } // If we have maxed out lvaTrackedCount then this CSE may end up as an untracked variable if (m_pCompiler->lvaTrackedCount == (unsigned)JitConfig.JitMaxLocalsToTrack()) { cse_def_cost += 1; cse_use_cost += 1; } } } if (slotCount > 1) { cse_def_cost *= slotCount; cse_use_cost *= slotCount; } // If this CSE is live across a call then we may have additional costs // if (candidate->LiveAcrossCall()) { // If we have a floating-point CSE that is both live across a call and there // are no callee-saved FP registers available, the RA will have to spill at // the def site and reload at the (first) use site, if the variable is a register // candidate. Account for that. if (varTypeIsFloating(candidate->Expr()) && (CNT_CALLEE_SAVED_FLOAT == 0) && !candidate->IsConservative()) { cse_def_cost += 1; cse_use_cost += 1; } // If we don't have a lot of variables to enregister or we have a floating point type // then we will likely need to spill an additional caller save register. // if ((enregCount < (CNT_CALLEE_ENREG * 3 / 2)) || varTypeIsFloating(candidate->Expr())) { // Extra cost in case we have to spill/restore a caller saved register extra_yes_cost = BB_UNITY_WEIGHT_UNSIGNED; if (cseRefCnt < moderateRefCnt) // If Conservative CSE promotion { extra_yes_cost *= 2; // full cost if we are being Conservative } } #ifdef FEATURE_SIMD // SIMD types may cause a SIMD register to be spilled/restored in the prolog and epilog. // if (varTypeIsSIMD(candidate->Expr()->TypeGet())) { // We don't have complete information about when these extra spilled/restore will be needed. // Instead we are conservative and assume that each SIMD CSE that is live across a call // will cause an additional spill/restore in the prolog and epilog. // int spillSimdRegInProlog = 1; // If we have a SIMD32 that is live across a call we have even higher spill costs // if (candidate->Expr()->TypeGet() == TYP_SIMD32) { // Additionally for a simd32 CSE candidate we assume that and second spilled/restore will be needed. // (to hold the upper half of the simd32 register that isn't preserved across the call) // spillSimdRegInProlog++; // We also increase the CSE use cost here to because we may have to generate instructions // to move the upper half of the simd32 before and after a call. // cse_use_cost += 2; } extra_yes_cost = (BB_UNITY_WEIGHT_UNSIGNED * spillSimdRegInProlog) * 3; } #endif // FEATURE_SIMD } // estimate the cost from lost codesize reduction if we do not perform the CSE if (candidate->Size() > cse_use_cost) { Compiler::CSEdsc* dsc = candidate->CseDsc(); // We need to retrieve the actual use count, not the // weighted count extra_no_cost = candidate->Size() - cse_use_cost; extra_no_cost = extra_no_cost * dsc->csdUseCount * 2; } /* no_cse_cost is the cost estimate when we decide not to make a CSE */ /* yes_cse_cost is the cost estimate when we decide to make a CSE */ no_cse_cost = candidate->UseCount() * candidate->Cost(); yes_cse_cost = (candidate->DefCount() * cse_def_cost) + (candidate->UseCount() * cse_use_cost); no_cse_cost += extra_no_cost; yes_cse_cost += extra_yes_cost; #ifdef DEBUG if (m_pCompiler->verbose) { printf("cseRefCnt=%f, aggressiveRefCnt=%f, moderateRefCnt=%f\n", cseRefCnt, aggressiveRefCnt, moderateRefCnt); printf("defCnt=%f, useCnt=%f, cost=%d, size=%d%s\n", candidate->DefCount(), candidate->UseCount(), candidate->Cost(), candidate->Size(), candidate->LiveAcrossCall() ? ", LiveAcrossCall" : ""); printf("def_cost=%d, use_cost=%d, extra_no_cost=%d, extra_yes_cost=%d\n", cse_def_cost, cse_use_cost, extra_no_cost, extra_yes_cost); printf("CSE cost savings check (%f >= %f) %s\n", no_cse_cost, yes_cse_cost, (no_cse_cost >= yes_cse_cost) ? "passes" : "fails"); } #endif // DEBUG // Should we make this candidate into a CSE? // Is the yes cost less than the no cost // if (yes_cse_cost <= no_cse_cost) { result = true; // Yes make this a CSE } else { /* In stress mode we will make some extra CSEs */ if (no_cse_cost > 0) { int percentage = (int)((no_cse_cost * 100) / yes_cse_cost); if (m_pCompiler->compStressCompile(Compiler::STRESS_MAKE_CSE, percentage)) { result = true; // Yes make this a CSE } } } return result; } // IsCompatibleType() takes two var_types and returns true if they // are compatible types for CSE substitution // bool IsCompatibleType(var_types cseLclVarTyp, var_types expTyp) { // Exact type match is the expected case if (cseLclVarTyp == expTyp) { return true; } // We also allow TYP_BYREF and TYP_I_IMPL as compatible types // if ((cseLclVarTyp == TYP_BYREF) && (expTyp == TYP_I_IMPL)) { return true; } if ((cseLclVarTyp == TYP_I_IMPL) && (expTyp == TYP_BYREF)) { return true; } // Otherwise we have incompatible types return false; } // PerformCSE() takes a successful candidate and performs the appropriate replacements: // // It will replace all of the CSE defs with assignments to a new "cse0" LclVar // and will replace all of the CSE uses with reads of the "cse0" LclVar // // It will also put cse0 into SSA if there is just one def. void PerformCSE(CSE_Candidate* successfulCandidate) { weight_t cseRefCnt = (successfulCandidate->DefCount() * 2) + successfulCandidate->UseCount(); if (successfulCandidate->LiveAcrossCall() != 0) { // As we introduce new LclVars for these CSE we slightly // increase the cutoffs for aggressive and moderate CSE's // weight_t incr = BB_UNITY_WEIGHT; if (cseRefCnt > aggressiveRefCnt) { aggressiveRefCnt += incr; } if (cseRefCnt > moderateRefCnt) { moderateRefCnt += (incr / 2); } } #ifdef DEBUG // Setup the message arg for lvaGrabTemp() // const char* grabTempMessage = "CSE - unknown"; if (successfulCandidate->IsAggressive()) { grabTempMessage = "CSE - aggressive"; } else if (successfulCandidate->IsModerate()) { grabTempMessage = "CSE - moderate"; } else if (successfulCandidate->IsConservative()) { grabTempMessage = "CSE - conservative"; } else if (successfulCandidate->IsStressCSE()) { grabTempMessage = "CSE - stress mode"; } #endif // DEBUG /* Introduce a new temp for the CSE */ // we will create a long lifetime temp for the new CSE LclVar unsigned cseLclVarNum = m_pCompiler->lvaGrabTemp(false DEBUGARG(grabTempMessage)); var_types cseLclVarTyp = genActualType(successfulCandidate->Expr()->TypeGet()); if (varTypeIsStruct(cseLclVarTyp)) { // Retrieve the struct handle that we recorded while bulding the list of CSE candidates. // If all occurrences were in GT_IND nodes it could still be NO_CLASS_HANDLE // CORINFO_CLASS_HANDLE structHnd = successfulCandidate->CseDsc()->csdStructHnd; if (structHnd == NO_CLASS_HANDLE) { assert(varTypeIsSIMD(cseLclVarTyp)); // We are not setting it for `SIMD* indir` during the first path // because it is not precise, see `optValnumCSE_Index`. structHnd = m_pCompiler->gtGetStructHandle(successfulCandidate->CseDsc()->csdTree); } assert(structHnd != NO_CLASS_HANDLE); m_pCompiler->lvaSetStruct(cseLclVarNum, structHnd, false); } m_pCompiler->lvaTable[cseLclVarNum].lvType = cseLclVarTyp; m_pCompiler->lvaTable[cseLclVarNum].lvIsCSE = true; // Record that we created a new LclVar for use as a CSE temp m_addCSEcount++; m_pCompiler->optCSEcount++; // Walk all references to this CSE, adding an assignment // to the CSE temp to all defs and changing all refs to // a simple use of the CSE temp. // // Later we will unmark any nested CSE's for the CSE uses. // Compiler::CSEdsc* dsc = successfulCandidate->CseDsc(); // If there's just a single def for the CSE, we'll put this // CSE into SSA form on the fly. We won't need any PHIs. unsigned cseSsaNum = SsaConfig::RESERVED_SSA_NUM; if (dsc->csdDefCount == 1) { JITDUMP(FMT_CSE " is single-def, so associated CSE temp V%02u will be in SSA\n", dsc->csdIndex, cseLclVarNum); m_pCompiler->lvaTable[cseLclVarNum].lvInSsa = true; // Allocate the ssa num CompAllocator allocator = m_pCompiler->getAllocator(CMK_SSA); cseSsaNum = m_pCompiler->lvaTable[cseLclVarNum].lvPerSsaData.AllocSsaNum(allocator); } // Verify that all of the ValueNumbers in this list are correct as // Morph will change them when it performs a mutating operation. // bool setRefCnt = true; bool allSame = true; bool isSharedConst = successfulCandidate->IsSharedConst(); ValueNum bestVN = ValueNumStore::NoVN; bool bestIsDef = false; ssize_t bestConstValue = 0; Compiler::treeStmtLst* lst = dsc->csdTreeList; while (lst != nullptr) { // Ignore this node if the gtCSEnum value has been cleared if (IS_CSE_INDEX(lst->tslTree->gtCSEnum)) { // We used the liberal Value numbers when building the set of CSE ValueNum currVN = m_pCompiler->vnStore->VNLiberalNormalValue(lst->tslTree->gtVNPair); assert(currVN != ValueNumStore::NoVN); ssize_t curConstValue = isSharedConst ? m_pCompiler->vnStore->CoercedConstantValue<ssize_t>(currVN) : 0; GenTree* exp = lst->tslTree; bool isDef = IS_CSE_DEF(exp->gtCSEnum); if (bestVN == ValueNumStore::NoVN) { // first entry // set bestVN bestVN = currVN; if (isSharedConst) { // set bestConstValue and bestIsDef bestConstValue = curConstValue; bestIsDef = isDef; } } else if (currVN != bestVN) { assert(isSharedConst); // Must be true when we have differing VNs // subsequent entry // clear allSame and check for a lower constant allSame = false; ssize_t diff = curConstValue - bestConstValue; // The ARM addressing modes allow for a subtraction of up to 255 // so we will allow the diff to be up to -255 before replacing a CSE def // This will minimize the number of extra subtract instructions. // if ((bestIsDef && (diff < -255)) || (!bestIsDef && (diff < 0))) { // set new bestVN, bestConstValue and bestIsDef bestVN = currVN; bestConstValue = curConstValue; bestIsDef = isDef; } } BasicBlock* blk = lst->tslBlock; weight_t curWeight = blk->getBBWeight(m_pCompiler); if (setRefCnt) { m_pCompiler->lvaTable[cseLclVarNum].setLvRefCnt(1); m_pCompiler->lvaTable[cseLclVarNum].setLvRefCntWtd(curWeight); setRefCnt = false; } else { m_pCompiler->lvaTable[cseLclVarNum].incRefCnts(curWeight, m_pCompiler); } // A CSE Def references the LclVar twice // if (isDef) { m_pCompiler->lvaTable[cseLclVarNum].incRefCnts(curWeight, m_pCompiler); } } lst = lst->tslNext; } dsc->csdConstDefValue = bestConstValue; dsc->csdConstDefVN = bestVN; #ifdef DEBUG if (m_pCompiler->verbose) { if (!allSame) { if (isSharedConst) { printf("\nWe have shared Const CSE's and selected " FMT_VN " with a value of 0x%p as the base.\n", dsc->csdConstDefVN, dspPtr(dsc->csdConstDefValue)); } else // !isSharedConst { lst = dsc->csdTreeList; GenTree* firstTree = lst->tslTree; printf("In %s, CSE (oper = %s, type = %s) has differing VNs: ", m_pCompiler->info.compFullName, GenTree::OpName(firstTree->OperGet()), varTypeName(firstTree->TypeGet())); while (lst != nullptr) { if (IS_CSE_INDEX(lst->tslTree->gtCSEnum)) { ValueNum currVN = m_pCompiler->vnStore->VNLiberalNormalValue(lst->tslTree->gtVNPair); printf("[%06d](%s " FMT_VN ") ", m_pCompiler->dspTreeID(lst->tslTree), IS_CSE_USE(lst->tslTree->gtCSEnum) ? "use" : "def", currVN); } lst = lst->tslNext; } printf("\n"); } } } #endif // DEBUG // Setup 'lst' to point at the start of this candidate list lst = dsc->csdTreeList; noway_assert(lst); do { /* Process the next node in the list */ GenTree* exp = lst->tslTree; Statement* stmt = lst->tslStmt; BasicBlock* blk = lst->tslBlock; /* Advance to the next node in the list */ lst = lst->tslNext; // We may have cleared this CSE in optValuenumCSE_Availablity // due to different exception sets. // // Ignore this node if the gtCSEnum value has been cleared if (!IS_CSE_INDEX(exp->gtCSEnum)) { continue; } // Assert if we used DEBUG_DESTROY_NODE on this CSE exp assert(exp->gtOper != GT_COUNT); /* Make sure we update the weighted ref count correctly */ m_pCompiler->optCSEweight = blk->getBBWeight(m_pCompiler); /* Figure out the actual type of the value */ var_types expTyp = genActualType(exp->TypeGet()); // The cseLclVarType must be a compatible with expTyp // ValueNumStore* vnStore = m_pCompiler->vnStore; noway_assert(IsCompatibleType(cseLclVarTyp, expTyp) || (dsc->csdConstDefVN != vnStore->VNForNull())); // This will contain the replacement tree for exp // It will either be the CSE def or CSE ref // GenTree* cse = nullptr; bool isDef; FieldSeqNode* fldSeq = nullptr; bool commaOnly = true; GenTree* effectiveExp = exp->gtEffectiveVal(commaOnly); const bool hasZeroMapAnnotation = m_pCompiler->GetZeroOffsetFieldMap()->Lookup(effectiveExp, &fldSeq); if (IS_CSE_USE(exp->gtCSEnum)) { /* This is a use of the CSE */ isDef = false; #ifdef DEBUG if (m_pCompiler->verbose) { printf("\nWorking on the replacement of the " FMT_CSE " use at ", exp->gtCSEnum); Compiler::printTreeID(exp); printf(" in " FMT_BB "\n", blk->bbNum); } #endif // DEBUG // We will replace the CSE ref with a new tree // this is typically just a simple use of the new CSE LclVar // // Create a reference to the CSE temp GenTree* cseLclVar = m_pCompiler->gtNewLclvNode(cseLclVarNum, cseLclVarTyp); cseLclVar->gtVNPair.SetBoth(dsc->csdConstDefVN); // Assign the ssa num for the lclvar use. Note it may be the reserved num. cseLclVar->AsLclVarCommon()->SetSsaNum(cseSsaNum); cse = cseLclVar; if (isSharedConst) { ValueNum currVN = m_pCompiler->vnStore->VNLiberalNormalValue(exp->gtVNPair); ssize_t curValue = m_pCompiler->vnStore->CoercedConstantValue<ssize_t>(currVN); ssize_t delta = curValue - dsc->csdConstDefValue; if (delta != 0) { GenTree* deltaNode = m_pCompiler->gtNewIconNode(delta, cseLclVarTyp); cse = m_pCompiler->gtNewOperNode(GT_ADD, cseLclVarTyp, cseLclVar, deltaNode); cse->SetDoNotCSE(); } } // assign the proper ValueNumber, A CSE use discards any exceptions cse->gtVNPair = vnStore->VNPNormalPair(exp->gtVNPair); // shared const CSE has the correct value number assigned // and both liberal and conservative are identical // and they do not use theConservativeVN // if (!isSharedConst) { ValueNum theConservativeVN = successfulCandidate->CseDsc()->defConservNormVN; if (theConservativeVN != ValueNumStore::NoVN) { // All defs of this CSE share the same normal conservative VN, and we are rewriting this // use to fetch the same value with no reload, so we can safely propagate that // conservative VN to this use. This can help range check elimination later on. cse->gtVNPair.SetConservative(theConservativeVN); // If the old VN was flagged as a checked bound, propagate that to the new VN // to make sure assertion prop will pay attention to this VN. ValueNum oldVN = exp->gtVNPair.GetConservative(); if (!vnStore->IsVNConstant(theConservativeVN) && vnStore->IsVNCheckedBound(oldVN)) { vnStore->SetVNIsCheckedBound(theConservativeVN); } GenTree* cmp; if ((m_pCompiler->optCseCheckedBoundMap != nullptr) && (m_pCompiler->optCseCheckedBoundMap->Lookup(exp, &cmp))) { // Propagate the new value number to this compare node as well, since // subsequent range check elimination will try to correlate it with // the other appearances that are getting CSEd. ValueNum oldCmpVN = cmp->gtVNPair.GetConservative(); ValueNum newCmpArgVN; ValueNumStore::CompareCheckedBoundArithInfo info; if (vnStore->IsVNCompareCheckedBound(oldCmpVN)) { // Comparison is against the bound directly. newCmpArgVN = theConservativeVN; vnStore->GetCompareCheckedBound(oldCmpVN, &info); } else { // Comparison is against the bound +/- some offset. assert(vnStore->IsVNCompareCheckedBoundArith(oldCmpVN)); vnStore->GetCompareCheckedBoundArithInfo(oldCmpVN, &info); newCmpArgVN = vnStore->VNForFunc(vnStore->TypeOfVN(info.arrOp), (VNFunc)info.arrOper, info.arrOp, theConservativeVN); } ValueNum newCmpVN = vnStore->VNForFunc(vnStore->TypeOfVN(oldCmpVN), (VNFunc)info.cmpOper, info.cmpOp, newCmpArgVN); cmp->gtVNPair.SetConservative(newCmpVN); } } } #ifdef DEBUG cse->gtDebugFlags |= GTF_DEBUG_VAR_CSE_REF; #endif // DEBUG // Now we need to unmark any nested CSE's uses that are found in 'exp' // As well we extract any nested CSE defs that are found in 'exp' and // these are appended to the sideEffList // Afterwards the set of nodes in the 'sideEffectList' are preserved and // all other nodes are removed. // exp->gtCSEnum = NO_CSE; // clear the gtCSEnum field GenTree* sideEffList = nullptr; m_pCompiler->gtExtractSideEffList(exp, &sideEffList, GTF_PERSISTENT_SIDE_EFFECTS | GTF_IS_IN_CSE); // If we have any side effects or extracted CSE defs then we need to create a GT_COMMA tree instead // if (sideEffList != nullptr) { #ifdef DEBUG if (m_pCompiler->verbose) { printf("\nThis CSE use has side effects and/or nested CSE defs. The sideEffectList:\n"); m_pCompiler->gtDispTree(sideEffList); printf("\n"); } #endif GenTree* cseVal = cse; GenTree* curSideEff = sideEffList; ValueNumPair exceptions_vnp = ValueNumStore::VNPForEmptyExcSet(); while ((curSideEff->OperGet() == GT_COMMA) || (curSideEff->OperGet() == GT_ASG)) { GenTree* op1 = curSideEff->AsOp()->gtOp1; GenTree* op2 = curSideEff->AsOp()->gtOp2; ValueNumPair op1vnp; ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet(); vnStore->VNPUnpackExc(op1->gtVNPair, &op1vnp, &op1Xvnp); exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op1Xvnp); curSideEff = op2; } // We may have inserted a narrowing cast during a previous remorph // and it will not have a value number. if ((curSideEff->OperGet() == GT_CAST) && !curSideEff->gtVNPair.BothDefined()) { // The inserted cast will have no exceptional effects assert(curSideEff->gtOverflow() == false); // Process the exception effects from the cast's operand. curSideEff = curSideEff->AsOp()->gtOp1; } ValueNumPair op2vnp; ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet(); vnStore->VNPUnpackExc(curSideEff->gtVNPair, &op2vnp, &op2Xvnp); exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op2Xvnp); op2Xvnp = ValueNumStore::VNPForEmptyExcSet(); vnStore->VNPUnpackExc(cseVal->gtVNPair, &op2vnp, &op2Xvnp); exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op2Xvnp); // Create a comma node with the sideEffList as op1 cse = m_pCompiler->gtNewOperNode(GT_COMMA, expTyp, sideEffList, cseVal); cse->gtVNPair = vnStore->VNPWithExc(op2vnp, exceptions_vnp); } } else { /* This is a def of the CSE */ isDef = true; #ifdef DEBUG if (m_pCompiler->verbose) { printf("\n" FMT_CSE " def at ", GET_CSE_INDEX(exp->gtCSEnum)); Compiler::printTreeID(exp); printf(" replaced in " FMT_BB " with def of V%02u\n", blk->bbNum, cseLclVarNum); } #endif // DEBUG GenTree* val = exp; if (isSharedConst) { ValueNum currVN = m_pCompiler->vnStore->VNLiberalNormalValue(exp->gtVNPair); ssize_t curValue = m_pCompiler->vnStore->CoercedConstantValue<ssize_t>(currVN); ssize_t delta = curValue - dsc->csdConstDefValue; if (delta != 0) { val = m_pCompiler->gtNewIconNode(dsc->csdConstDefValue, cseLclVarTyp); val->gtVNPair.SetBoth(dsc->csdConstDefVN); } } /* Create an assignment of the value to the temp */ GenTree* asg = m_pCompiler->gtNewTempAssign(cseLclVarNum, val); GenTree* origAsg = asg; if (!asg->OperIs(GT_ASG)) { // This can only be the case for a struct in which the 'val' was a COMMA, so // the assignment is sunk below it. asg = asg->gtEffectiveVal(true); noway_assert(origAsg->OperIs(GT_COMMA) && (origAsg == val)); } else { noway_assert(asg->AsOp()->gtOp2 == val); } // assign the proper Value Numbers asg->gtVNPair.SetBoth(ValueNumStore::VNForVoid()); // The GT_ASG node itself is $VN.Void asg->AsOp()->gtOp1->gtVNPair = val->gtVNPair; // The dest op is the same as 'val' noway_assert(asg->AsOp()->gtOp1->gtOper == GT_LCL_VAR); // Backpatch the SSA def, if we're putting this CSE temp into ssa. asg->AsOp()->gtOp1->AsLclVar()->SetSsaNum(cseSsaNum); // Move the information about the CSE def to the assignment; it // now indicates a completed CSE def instead of just a // candidate. optCSE_canSwap uses this information to reason // about evaluation order in between substitutions of CSE // defs/uses. asg->gtCSEnum = exp->gtCSEnum; exp->gtCSEnum = NO_CSE; if (cseSsaNum != SsaConfig::RESERVED_SSA_NUM) { LclSsaVarDsc* ssaVarDsc = m_pCompiler->lvaTable[cseLclVarNum].GetPerSsaData(cseSsaNum); // These should not have been set yet, since this is the first and // only def for this CSE. assert(ssaVarDsc->GetBlock() == nullptr); assert(ssaVarDsc->GetAssignment() == nullptr); ssaVarDsc->m_vnPair = val->gtVNPair; ssaVarDsc->SetBlock(blk); ssaVarDsc->SetAssignment(asg->AsOp()); } /* Create a reference to the CSE temp */ GenTree* cseLclVar = m_pCompiler->gtNewLclvNode(cseLclVarNum, cseLclVarTyp); cseLclVar->gtVNPair.SetBoth(dsc->csdConstDefVN); // Assign the ssa num for the lclvar use. Note it may be the reserved num. cseLclVar->AsLclVarCommon()->SetSsaNum(cseSsaNum); GenTree* cseUse = cseLclVar; if (isSharedConst) { ValueNum currVN = m_pCompiler->vnStore->VNLiberalNormalValue(exp->gtVNPair); ssize_t curValue = m_pCompiler->vnStore->CoercedConstantValue<ssize_t>(currVN); ssize_t delta = curValue - dsc->csdConstDefValue; if (delta != 0) { GenTree* deltaNode = m_pCompiler->gtNewIconNode(delta, cseLclVarTyp); cseUse = m_pCompiler->gtNewOperNode(GT_ADD, cseLclVarTyp, cseLclVar, deltaNode); cseUse->SetDoNotCSE(); } } cseUse->gtVNPair = val->gtVNPair; // The 'cseUse' is equal to 'val' /* Create a comma node for the CSE assignment */ cse = m_pCompiler->gtNewOperNode(GT_COMMA, expTyp, origAsg, cseUse); cse->gtVNPair = cseUse->gtVNPair; // The comma's value is the same as 'val' // as the assignment to the CSE LclVar // cannot add any new exceptions } cse->CopyReg(exp); // The cse inheirits any reg num property from the orginal exp node exp->ClearRegNum(); // The exp node (for a CSE def) no longer has a register requirement // Walk the statement 'stmt' and find the pointer // in the tree is pointing to 'exp' // Compiler::FindLinkData linkData = m_pCompiler->gtFindLink(stmt, exp); GenTree** link = linkData.result; #ifdef DEBUG if (link == nullptr) { printf("\ngtFindLink failed: stm="); Compiler::printStmtID(stmt); printf(", exp="); Compiler::printTreeID(exp); printf("\n"); printf("stm ="); m_pCompiler->gtDispStmt(stmt); printf("\n"); printf("exp ="); m_pCompiler->gtDispTree(exp); printf("\n"); } #endif // DEBUG noway_assert(link); // Mutate this link, thus replacing the old exp with the new CSE representation // *link = cse; // If it has a zero-offset field seq, copy annotation. if (hasZeroMapAnnotation) { m_pCompiler->fgAddFieldSeqForZeroOffset(cse, fldSeq); } assert(m_pCompiler->fgRemoveRestOfBlock == false); /* re-morph the statement */ m_pCompiler->fgMorphBlockStmt(blk, stmt DEBUGARG("optValnumCSE")); } while (lst != nullptr); } // Consider each of the CSE candidates and if the CSE passes // the PromotionCheck then transform the CSE by calling PerformCSE // void ConsiderCandidates() { /* Consider each CSE candidate, in order of decreasing cost */ unsigned cnt = m_pCompiler->optCSECandidateCount; Compiler::CSEdsc** ptr = sortTab; for (; (cnt > 0); cnt--, ptr++) { Compiler::CSEdsc* dsc = *ptr; CSE_Candidate candidate(this, dsc); if (dsc->defExcSetPromise == ValueNumStore::NoVN) { JITDUMP("Abandoned " FMT_CSE " because we had defs with different Exc sets\n", candidate.CseIndex()); continue; } if (dsc->csdStructHndMismatch) { JITDUMP("Abandoned " FMT_CSE " because we had mismatching struct handles\n", candidate.CseIndex()); continue; } candidate.InitializeCounts(); if (candidate.UseCount() == 0) { JITDUMP("Skipped " FMT_CSE " because use count is 0\n", candidate.CseIndex()); continue; } #ifdef DEBUG if (m_pCompiler->verbose) { if (!Compiler::Is_Shared_Const_CSE(dsc->csdHashKey)) { printf("\nConsidering " FMT_CSE " {$%-3x, $%-3x} [def=%3f, use=%3f, cost=%3u%s]\n", candidate.CseIndex(), dsc->csdHashKey, dsc->defExcSetPromise, candidate.DefCount(), candidate.UseCount(), candidate.Cost(), dsc->csdLiveAcrossCall ? ", call" : " "); } else { size_t kVal = Compiler::Decode_Shared_Const_CSE_Value(dsc->csdHashKey); printf("\nConsidering " FMT_CSE " {K_%p} [def=%3f, use=%3f, cost=%3u%s]\n", candidate.CseIndex(), dspPtr(kVal), candidate.DefCount(), candidate.UseCount(), candidate.Cost(), dsc->csdLiveAcrossCall ? ", call" : " "); } printf("CSE Expression : \n"); m_pCompiler->gtDispTree(candidate.Expr()); printf("\n"); } #endif // DEBUG if ((dsc->csdDefCount <= 0) || (dsc->csdUseCount == 0)) { // If we reach this point, then the CSE def was incorrectly marked or the // block with this use is unreachable. So skip and go to the next CSE. // Without the "continue", we'd generate bad code in retail. // Commented out a noway_assert(false) here due to bug: 3290124. // The problem is if there is sub-graph that is not reachable from the // entry point, the CSE flags propagated, would be incorrect for it. continue; } bool doCSE = PromotionCheck(&candidate); #ifdef DEBUG if (m_pCompiler->verbose) { if (doCSE) { printf("\nPromoting CSE:\n"); } else { printf("Did Not promote this CSE\n"); } } #endif // DEBUG if (doCSE) { PerformCSE(&candidate); } } } // Perform the necessary cleanup after our CSE heuristics have run // void Cleanup() { // Nothing to do, currently. } }; /***************************************************************************** * * Routine for performing the Value Number based CSE using our heuristics */ void Compiler::optValnumCSE_Heuristic() { #ifdef DEBUG if (verbose) { printf("\n************ Trees at start of optValnumCSE_Heuristic()\n"); fgDumpTrees(fgFirstBB, nullptr); printf("\n"); } #endif // DEBUG CSE_Heuristic cse_heuristic(this); cse_heuristic.Initialize(); cse_heuristic.SortCandidates(); cse_heuristic.ConsiderCandidates(); cse_heuristic.Cleanup(); } /***************************************************************************** * * Perform common sub-expression elimination. */ void Compiler::optOptimizeValnumCSEs() { #ifdef DEBUG if (optConfigDisableCSE()) { return; // Disabled by JitNoCSE } #endif optValnumCSE_phase = true; optCSEweight = -1.0f; optValnumCSE_Init(); if (optValnumCSE_Locate()) { optValnumCSE_InitDataFlow(); optValnumCSE_DataFlow(); optValnumCSE_Availablity(); optValnumCSE_Heuristic(); } optValnumCSE_phase = false; } /***************************************************************************** * * The following determines whether the given expression is a worthy CSE * candidate. */ bool Compiler::optIsCSEcandidate(GenTree* tree) { /* No good if the expression contains side effects or if it was marked as DONT CSE */ if (tree->gtFlags & (GTF_ASG | GTF_DONT_CSE)) { return false; } var_types type = tree->TypeGet(); genTreeOps oper = tree->OperGet(); if (type == TYP_VOID) { return false; } // If this is a struct type (including SIMD*), we can only consider it for CSE-ing // if we can get its handle, so that we can create a temp. if (varTypeIsStruct(type) && (gtGetStructHandleIfPresent(tree) == NO_CLASS_HANDLE)) { return false; } unsigned cost; if (compCodeOpt() == SMALL_CODE) { cost = tree->GetCostSz(); } else { cost = tree->GetCostEx(); } /* Don't bother if the potential savings are very low */ if (cost < MIN_CSE_COST) { return false; } #if !CSE_CONSTS /* Don't bother with constants */ if (tree->OperIsConst()) { return false; } #endif /* Check for some special cases */ switch (oper) { case GT_CALL: GenTreeCall* call; call = tree->AsCall(); // Don't mark calls to allocation helpers as CSE candidates. // Marking them as CSE candidates usually blocks CSEs rather than enables them. // A typical case is: // [1] GT_IND(x) = GT_CALL ALLOC_HELPER // ... // [2] y = GT_IND(x) // ... // [3] z = GT_IND(x) // If we mark CALL ALLOC_HELPER as a CSE candidate, we later discover // that it can't be a CSE def because GT_INDs in [2] and [3] can cause // more exceptions (NullRef) so we abandon this CSE. // If we don't mark CALL ALLOC_HELPER as a CSE candidate, we are able // to use GT_IND(x) in [2] as a CSE def. if ((call->gtCallType == CT_HELPER) && s_helperCallProperties.IsAllocator(eeGetHelperNum(call->gtCallMethHnd))) { return false; } // If we have a simple helper call with no other persistent side-effects // then we allow this tree to be a CSE candidate // if (gtTreeHasSideEffects(tree, GTF_PERSISTENT_SIDE_EFFECTS | GTF_IS_IN_CSE) == false) { return true; } else { // Calls generally cannot be CSE-ed return false; } case GT_IND: // TODO-CQ: Review this... /* We try to cse GT_ARR_ELEM nodes instead of GT_IND(GT_ARR_ELEM). Doing the first allows cse to also kick in for code like "GT_IND(GT_ARR_ELEM) = GT_IND(GT_ARR_ELEM) + xyz", whereas doing the second would not allow it */ return (tree->AsOp()->gtOp1->gtOper != GT_ARR_ELEM); case GT_CNS_LNG: #ifndef TARGET_64BIT return false; // Don't CSE 64-bit constants on 32-bit platforms #endif case GT_CNS_INT: case GT_CNS_DBL: case GT_CNS_STR: return true; // We reach here only when CSE_CONSTS is enabled case GT_ARR_ELEM: case GT_ARR_LENGTH: case GT_CLS_VAR: case GT_LCL_FLD: return true; case GT_LCL_VAR: return false; // Can't CSE a volatile LCL_VAR case GT_NEG: case GT_NOT: case GT_BSWAP: case GT_BSWAP16: case GT_CAST: return true; // CSE these Unary Operators case GT_SUB: case GT_DIV: case GT_MOD: case GT_UDIV: case GT_UMOD: case GT_OR: case GT_AND: case GT_XOR: case GT_RSH: case GT_RSZ: case GT_ROL: case GT_ROR: return true; // CSE these Binary Operators case GT_ADD: // Check for ADDRMODE flag on these Binary Operators case GT_MUL: case GT_LSH: if ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0) { return false; } return true; case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GE: case GT_GT: return true; // Allow the CSE of Comparison operators #ifdef FEATURE_SIMD case GT_SIMD: return true; // allow SIMD intrinsics to be CSE-ed #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: { GenTreeHWIntrinsic* hwIntrinsicNode = tree->AsHWIntrinsic(); assert(hwIntrinsicNode != nullptr); HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(hwIntrinsicNode->GetHWIntrinsicId()); switch (category) { #ifdef TARGET_XARCH case HW_Category_SimpleSIMD: case HW_Category_IMM: case HW_Category_Scalar: case HW_Category_SIMDScalar: case HW_Category_Helper: break; #elif defined(TARGET_ARM64) case HW_Category_SIMD: case HW_Category_SIMDByIndexedElement: case HW_Category_ShiftLeftByImmediate: case HW_Category_ShiftRightByImmediate: case HW_Category_Scalar: case HW_Category_Helper: break; #endif case HW_Category_MemoryLoad: case HW_Category_MemoryStore: case HW_Category_Special: default: return false; } if (hwIntrinsicNode->OperIsMemoryStore()) { // NI_BMI2_MultiplyNoFlags, etc... return false; } if (hwIntrinsicNode->OperIsMemoryLoad()) { // NI_AVX2_BroadcastScalarToVector128, NI_AVX2_GatherVector128, etc... return false; } return true; // allow Hardware Intrinsics to be CSE-ed } #endif // FEATURE_HW_INTRINSICS case GT_INTRINSIC: return true; // allow Intrinsics to be CSE-ed case GT_OBJ: return varTypeIsEnregisterable(type); // Allow enregisterable GT_OBJ's to be CSE-ed. (i.e. SIMD types) case GT_COMMA: return true; // Allow GT_COMMA nodes to be CSE-ed. case GT_COLON: case GT_QMARK: case GT_NOP: case GT_RETURN: return false; // Currently the only special nodes that we hit // that we know that we don't want to CSE default: break; // Any new nodes that we might add later... } return false; } #ifdef DEBUG // // A Debug only method that allows you to control whether the CSE logic is enabled for this method. // // If this method returns false then the CSE phase should be performed. // If the method returns true then the CSE phase should be skipped. // bool Compiler::optConfigDisableCSE() { // Next check if COMPlus_JitNoCSE is set and applies to this method // unsigned jitNoCSE = JitConfig.JitNoCSE(); if (jitNoCSE > 0) { unsigned methodCount = Compiler::jitTotalMethodCompiled; if ((jitNoCSE & 0xF000000) == 0xF000000) { unsigned methodCountMask = methodCount & 0xFFF; unsigned bitsZero = (jitNoCSE >> 12) & 0xFFF; unsigned bitsOne = (jitNoCSE >> 0) & 0xFFF; if (((methodCountMask & bitsOne) == bitsOne) && ((~methodCountMask & bitsZero) == bitsZero)) { if (verbose) { printf(" Disabled by JitNoCSE methodCountMask\n"); } return true; // The CSE phase for this method is disabled } } else if (jitNoCSE <= (methodCount + 1)) { if (verbose) { printf(" Disabled by JitNoCSE > methodCount\n"); } return true; // The CSE phase for this method is disabled } } return false; } // // A Debug only method that allows you to control whether the CSE logic is enabled for // a particular CSE in a method // // If this method returns false then the CSE should be performed. // If the method returns true then the CSE should be skipped. // bool Compiler::optConfigDisableCSE2() { static unsigned totalCSEcount = 0; unsigned jitNoCSE2 = JitConfig.JitNoCSE2(); totalCSEcount++; if (jitNoCSE2 > 0) { if ((jitNoCSE2 & 0xF000000) == 0xF000000) { unsigned totalCSEMask = totalCSEcount & 0xFFF; unsigned bitsZero = (jitNoCSE2 >> 12) & 0xFFF; unsigned bitsOne = (jitNoCSE2 >> 0) & 0xFFF; if (((totalCSEMask & bitsOne) == bitsOne) && ((~totalCSEMask & bitsZero) == bitsZero)) { if (verbose) { printf(" Disabled by jitNoCSE2 Ones/Zeros mask\n"); } return true; } } else if ((jitNoCSE2 & 0xF000000) == 0xE000000) { unsigned totalCSEMask = totalCSEcount & 0xFFF; unsigned disableMask = jitNoCSE2 & 0xFFF; disableMask >>= (totalCSEMask % 12); if (disableMask & 1) { if (verbose) { printf(" Disabled by jitNoCSE2 rotating disable mask\n"); } return true; } } else if (jitNoCSE2 <= totalCSEcount) { if (verbose) { printf(" Disabled by jitNoCSE2 > totalCSEcount\n"); } return true; } } return false; } #endif void Compiler::optOptimizeCSEs() { if (optCSEstart != BAD_VAR_NUM) { // CSE being run multiple times so we may need to clean up old // information. optCleanupCSEs(); } optCSECandidateCount = 0; optCSEstart = lvaCount; INDEBUG(optEnsureClearCSEInfo()); optOptimizeValnumCSEs(); } /***************************************************************************** * * Cleanup after CSE to allow us to run more than once. */ void Compiler::optCleanupCSEs() { // We must clear the BBF_VISITED and BBF_MARKED flags. for (BasicBlock* const block : Blocks()) { // And clear all the "visited" bits on the block. block->bbFlags &= ~(BBF_VISITED | BBF_MARKED); // Walk the statement trees in this basic block. for (Statement* const stmt : block->NonPhiStatements()) { // We must clear the gtCSEnum field. for (GenTree* tree = stmt->GetRootNode(); tree; tree = tree->gtPrev) { tree->gtCSEnum = NO_CSE; } } } } #ifdef DEBUG /***************************************************************************** * * Ensure that all the CSE information in the IR is initialized the way we expect it, * before running a CSE phase. This is basically an assert that optCleanupCSEs() is not needed. */ void Compiler::optEnsureClearCSEInfo() { for (BasicBlock* const block : Blocks()) { assert((block->bbFlags & (BBF_VISITED | BBF_MARKED)) == 0); for (Statement* const stmt : block->NonPhiStatements()) { for (GenTree* tree = stmt->GetRootNode(); tree; tree = tree->gtPrev) { assert(tree->gtCSEnum == NO_CSE); } } } } //------------------------------------------------------------------------ // optPrintCSEDataFlowSet: Print out one of the CSE dataflow sets bbCseGen, bbCseIn, bbCseOut, // interpreting the bits in a more useful way for the dump. // // Arguments: // cseDataFlowSet - One of the dataflow sets to display // includeBits - Display the actual bits of the set as well // void Compiler::optPrintCSEDataFlowSet(EXPSET_VALARG_TP cseDataFlowSet, bool includeBits /* = true */) { if (includeBits) { printf("%s ", genES2str(cseLivenessTraits, cseDataFlowSet)); } bool first = true; for (unsigned cseIndex = 1; cseIndex <= optCSECandidateCount; cseIndex++) { unsigned cseAvailBit = getCSEAvailBit(cseIndex); unsigned cseAvailCrossCallBit = getCSEAvailCrossCallBit(cseIndex); if (BitVecOps::IsMember(cseLivenessTraits, cseDataFlowSet, cseAvailBit)) { if (!first) { printf(", "); } const bool isAvailCrossCall = BitVecOps::IsMember(cseLivenessTraits, cseDataFlowSet, cseAvailCrossCallBit); printf(FMT_CSE "%s", cseIndex, isAvailCrossCall ? ".c" : ""); first = false; } } } #endif // DEBUG
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX OptCSE XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #include "jitstd/algorithm.h" #ifdef _MSC_VER #pragma hdrstop #endif /* static */ const size_t Compiler::s_optCSEhashSizeInitial = EXPSET_SZ * 2; const size_t Compiler::s_optCSEhashGrowthFactor = 2; const size_t Compiler::s_optCSEhashBucketSize = 4; /***************************************************************************** * * We've found all the candidates, build the index for easy access. */ void Compiler::optCSEstop() { if (optCSECandidateCount == 0) { return; } CSEdsc* dsc; CSEdsc** ptr; size_t cnt; optCSEtab = new (this, CMK_CSE) CSEdsc*[optCSECandidateCount](); for (cnt = optCSEhashSize, ptr = optCSEhash; cnt; cnt--, ptr++) { for (dsc = *ptr; dsc; dsc = dsc->csdNextInBucket) { if (dsc->csdIndex) { noway_assert((unsigned)dsc->csdIndex <= optCSECandidateCount); if (optCSEtab[dsc->csdIndex - 1] == nullptr) { optCSEtab[dsc->csdIndex - 1] = dsc; } } } } #ifdef DEBUG for (cnt = 0; cnt < optCSECandidateCount; cnt++) { noway_assert(optCSEtab[cnt] != nullptr); } #endif } /***************************************************************************** * * Return the descriptor for the CSE with the given index. */ inline Compiler::CSEdsc* Compiler::optCSEfindDsc(unsigned index) { noway_assert(index); noway_assert(index <= optCSECandidateCount); noway_assert(optCSEtab[index - 1]); return optCSEtab[index - 1]; } //------------------------------------------------------------------------ // Compiler::optUnmarkCSE // // Arguments: // tree - A sub tree that originally was part of a CSE use // that we are currently in the process of removing. // // Return Value: // Returns true if we can safely remove the 'tree' node. // Returns false if the node is a CSE def that the caller // needs to extract and preserve. // // Notes: // If 'tree' is a CSE use then we perform an unmark CSE operation // so that the CSE used counts and weight are updated properly. // The only caller for this method is optUnmarkCSEs which is a // tree walker visitor function. When we return false this method // returns WALK_SKIP_SUBTREES so that we don't visit the remaining // nodes of the CSE def. // bool Compiler::optUnmarkCSE(GenTree* tree) { if (!IS_CSE_INDEX(tree->gtCSEnum)) { // If this node isn't a CSE use or def we can safely remove this node. // return true; } // make sure it's been initialized noway_assert(optCSEweight >= 0); // Is this a CSE use? if (IS_CSE_USE(tree->gtCSEnum)) { unsigned CSEnum = GET_CSE_INDEX(tree->gtCSEnum); CSEdsc* desc = optCSEfindDsc(CSEnum); #ifdef DEBUG if (verbose) { printf("Unmark CSE use #%02d at ", CSEnum); printTreeID(tree); printf(": %3d -> %3d\n", desc->csdUseCount, desc->csdUseCount - 1); } #endif // DEBUG // Perform an unmark CSE operation // 1. Reduce the nested CSE's 'use' count noway_assert(desc->csdUseCount > 0); if (desc->csdUseCount > 0) { desc->csdUseCount -= 1; if (desc->csdUseWtCnt < optCSEweight) { desc->csdUseWtCnt = 0; } else { desc->csdUseWtCnt -= optCSEweight; } } // 2. Unmark the CSE infomation in the node tree->gtCSEnum = NO_CSE; return true; } else { // It is not safe to remove this node, so we will return false // and the caller must add this node to the side effect list // return false; } } Compiler::fgWalkResult Compiler::optCSE_MaskHelper(GenTree** pTree, fgWalkData* walkData) { GenTree* tree = *pTree; Compiler* comp = walkData->compiler; optCSE_MaskData* pUserData = (optCSE_MaskData*)(walkData->pCallbackData); if (IS_CSE_INDEX(tree->gtCSEnum)) { unsigned cseIndex = GET_CSE_INDEX(tree->gtCSEnum); // Note that we DO NOT use getCSEAvailBit() here, for the CSE_defMask/CSE_useMask unsigned cseBit = genCSEnum2bit(cseIndex); if (IS_CSE_DEF(tree->gtCSEnum)) { BitVecOps::AddElemD(comp->cseMaskTraits, pUserData->CSE_defMask, cseBit); } else { BitVecOps::AddElemD(comp->cseMaskTraits, pUserData->CSE_useMask, cseBit); } } return WALK_CONTINUE; } // This functions walks all the node for an given tree // and return the mask of CSE defs and uses for the tree // void Compiler::optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData) { pMaskData->CSE_defMask = BitVecOps::MakeEmpty(cseMaskTraits); pMaskData->CSE_useMask = BitVecOps::MakeEmpty(cseMaskTraits); fgWalkTreePre(&tree, optCSE_MaskHelper, (void*)pMaskData); } //------------------------------------------------------------------------ // optCSE_canSwap: Determine if the execution order of two nodes can be swapped. // // Arguments: // op1 - The first node // op2 - The second node // // Return Value: // Return true iff it safe to swap the execution order of 'op1' and 'op2', // considering only the locations of the CSE defs and uses. // // Assumptions: // 'op1' currently occurse before 'op2' in the execution order. // bool Compiler::optCSE_canSwap(GenTree* op1, GenTree* op2) { // op1 and op2 must be non-null. assert(op1 != nullptr); assert(op2 != nullptr); bool canSwap = true; // the default result unless proven otherwise. // If we haven't setup cseMaskTraits, do it now if (cseMaskTraits == nullptr) { cseMaskTraits = new (getAllocator(CMK_CSE)) BitVecTraits(optCSECandidateCount, this); } optCSE_MaskData op1MaskData; optCSE_MaskData op2MaskData; optCSE_GetMaskData(op1, &op1MaskData); optCSE_GetMaskData(op2, &op2MaskData); // We cannot swap if op1 contains a CSE def that is used by op2 if (!BitVecOps::IsEmptyIntersection(cseMaskTraits, op1MaskData.CSE_defMask, op2MaskData.CSE_useMask)) { canSwap = false; } else { // We also cannot swap if op2 contains a CSE def that is used by op1. if (!BitVecOps::IsEmptyIntersection(cseMaskTraits, op2MaskData.CSE_defMask, op1MaskData.CSE_useMask)) { canSwap = false; } } return canSwap; } /***************************************************************************** * * Compare function passed to jitstd::sort() by CSE_Heuristic::SortCandidates * when (CodeOptKind() != Compiler::SMALL_CODE) */ /* static */ bool Compiler::optCSEcostCmpEx::operator()(const CSEdsc* dsc1, const CSEdsc* dsc2) { GenTree* exp1 = dsc1->csdTree; GenTree* exp2 = dsc2->csdTree; auto expCost1 = exp1->GetCostEx(); auto expCost2 = exp2->GetCostEx(); if (expCost2 != expCost1) { return expCost2 < expCost1; } // Sort the higher Use Counts toward the top if (dsc2->csdUseWtCnt != dsc1->csdUseWtCnt) { return dsc2->csdUseWtCnt < dsc1->csdUseWtCnt; } // With the same use count, Sort the lower Def Counts toward the top if (dsc1->csdDefWtCnt != dsc2->csdDefWtCnt) { return dsc1->csdDefWtCnt < dsc2->csdDefWtCnt; } // In order to ensure that we have a stable sort, we break ties using the csdIndex return dsc1->csdIndex < dsc2->csdIndex; } /***************************************************************************** * * Compare function passed to jitstd::sort() by CSE_Heuristic::SortCandidates * when (CodeOptKind() == Compiler::SMALL_CODE) */ /* static */ bool Compiler::optCSEcostCmpSz::operator()(const CSEdsc* dsc1, const CSEdsc* dsc2) { GenTree* exp1 = dsc1->csdTree; GenTree* exp2 = dsc2->csdTree; auto expCost1 = exp1->GetCostSz(); auto expCost2 = exp2->GetCostSz(); if (expCost2 != expCost1) { return expCost2 < expCost1; } // Sort the higher Use Counts toward the top if (dsc2->csdUseCount != dsc1->csdUseCount) { return dsc2->csdUseCount < dsc1->csdUseCount; } // With the same use count, Sort the lower Def Counts toward the top if (dsc1->csdDefCount != dsc2->csdDefCount) { return dsc1->csdDefCount < dsc2->csdDefCount; } // In order to ensure that we have a stable sort, we break ties using the csdIndex return dsc1->csdIndex < dsc2->csdIndex; } /***************************************************************************** * * Initialize the Value Number CSE tracking logic. */ void Compiler::optValnumCSE_Init() { #ifdef DEBUG optCSEtab = nullptr; #endif // This gets set in optValnumCSE_InitDataFlow cseLivenessTraits = nullptr; // Initialize when used by optCSE_canSwap() cseMaskTraits = nullptr; // Allocate and clear the hash bucket table optCSEhash = new (this, CMK_CSE) CSEdsc*[s_optCSEhashSizeInitial](); optCSEhashSize = s_optCSEhashSizeInitial; optCSEhashMaxCountBeforeResize = optCSEhashSize * s_optCSEhashBucketSize; optCSEhashCount = 0; optCSECandidateCount = 0; optDoCSE = false; // Stays false until we find duplicate CSE tree // optCseCheckedBoundMap is unused in most functions, allocated only when used optCseCheckedBoundMap = nullptr; } unsigned optCSEKeyToHashIndex(size_t key, size_t optCSEhashSize) { unsigned hash; hash = (unsigned)key; #ifdef TARGET_64BIT hash ^= (unsigned)(key >> 32); #endif hash *= (unsigned)(optCSEhashSize + 1); hash >>= 7; return hash % optCSEhashSize; } //--------------------------------------------------------------------------- // optValnumCSE_Index: // - Returns the CSE index to use for this tree, // or zero if this expression is not currently a CSE. // // Arguments: // tree - The current candidate CSE expression // stmt - The current statement that contains tree // // // Notes: We build a hash table that contains all of the expressions that // are presented to this method. Whenever we see a duplicate expression // we have a CSE candidate. If it is the first time seeing the duplicate // we allocate a new CSE index. If we have already allocated a CSE index // we return that index. There currently is a limit on the number of CSEs // that we can have of MAX_CSE_CNT (64) // unsigned Compiler::optValnumCSE_Index(GenTree* tree, Statement* stmt) { size_t key; unsigned hval; CSEdsc* hashDsc; bool enableSharedConstCSE = false; bool isSharedConst = false; int configValue = JitConfig.JitConstCSE(); #if defined(TARGET_ARM64) // ARM64 - allow to combine with nearby offsets, when config is not 2 or 4 if ((configValue != CONST_CSE_ENABLE_ARM64_NO_SHARING) && (configValue != CONST_CSE_ENABLE_ALL_NO_SHARING)) { enableSharedConstCSE = true; } #endif // TARGET_ARM64 // All Platforms - also allow to combine with nearby offsets, when config is 3 if (configValue == CONST_CSE_ENABLE_ALL) { enableSharedConstCSE = true; } // We use the liberal Value numbers when building the set of CSE ValueNum vnLib = tree->GetVN(VNK_Liberal); ValueNum vnLibNorm = vnStore->VNNormalValue(vnLib); // We use the normal value number because we want the CSE candidate to // represent all expressions that produce the same normal value number. // We will handle the case where we have different exception sets when // promoting the candidates. // // We do this because a GT_IND will usually have a NullPtrExc entry in its // exc set, but we may have cleared the GTF_EXCEPT flag and if so, it won't // have an NullPtrExc, or we may have assigned the value of an GT_IND // into a LCL_VAR and then read it back later. // // When we are promoting the CSE candidates we ensure that any CSE // uses that we promote have an exc set that is the same as the CSE defs // or have an empty set. And that all of the CSE defs produced the required // set of exceptions for the CSE uses. // // We assign either vnLib or vnLibNorm as the hash key // // The only exception to using the normal value is for the GT_COMMA nodes. // Here we check to see if we have a GT_COMMA with a different value number // than the one from its op2. For this case we want to create two different // CSE candidates. This allows us to CSE the GT_COMMA separately from its value. // if (tree->OperGet() == GT_COMMA) { // op2 is the value produced by a GT_COMMA GenTree* op2 = tree->AsOp()->gtOp2; ValueNum vnOp2Lib = op2->GetVN(VNK_Liberal); // If the value number for op2 and tree are different, then some new // exceptions were produced by op1. For that case we will NOT use the // normal value. This allows us to CSE commas with an op1 that is // an BOUNDS_CHECK. // if (vnOp2Lib != vnLib) { key = vnLib; // include the exc set in the hash key } else { key = vnLibNorm; } // If we didn't do the above we would have op1 as the CSE def // and the parent comma as the CSE use (but with a different exc set) // This would prevent us from making any CSE with the comma // assert(vnLibNorm == vnStore->VNNormalValue(vnOp2Lib)); } else if (enableSharedConstCSE && tree->IsIntegralConst()) { assert(vnStore->IsVNConstant(vnLibNorm)); // We don't share small offset constants when they require a reloc // if (!tree->AsIntConCommon()->ImmedValNeedsReloc(this)) { // Here we make constants that have the same upper bits use the same key // // We create a key that encodes just the upper bits of the constant by // shifting out some of the low bits, (12 or 16 bits) // // This is the only case where the hash key is not a ValueNumber // size_t constVal = vnStore->CoercedConstantValue<size_t>(vnLibNorm); key = Encode_Shared_Const_CSE_Value(constVal); isSharedConst = true; } else { // Use the vnLibNorm value as the key key = vnLibNorm; } } else // Not a GT_COMMA or a GT_CNS_INT { key = vnLibNorm; } // Make sure that the result of Is_Shared_Const_CSE(key) matches isSharedConst. // Note that when isSharedConst is true then we require that the TARGET_SIGN_BIT is set in the key // and otherwise we require that we never create a ValueNumber with the TARGET_SIGN_BIT set. // assert(isSharedConst == Is_Shared_Const_CSE(key)); // Compute the hash value for the expression hval = optCSEKeyToHashIndex(key, optCSEhashSize); /* Look for a matching index in the hash table */ bool newCSE = false; for (hashDsc = optCSEhash[hval]; hashDsc; hashDsc = hashDsc->csdNextInBucket) { if (hashDsc->csdHashKey == key) { // Check for mismatched types on GT_CNS_INT nodes if ((tree->OperGet() == GT_CNS_INT) && (tree->TypeGet() != hashDsc->csdTree->TypeGet())) { continue; } treeStmtLst* newElem; /* Have we started the list of matching nodes? */ if (hashDsc->csdTreeList == nullptr) { // Create the new element based upon the matching hashDsc element. newElem = new (this, CMK_TreeStatementList) treeStmtLst; newElem->tslTree = hashDsc->csdTree; newElem->tslStmt = hashDsc->csdStmt; newElem->tslBlock = hashDsc->csdBlock; newElem->tslNext = nullptr; /* Start the list with the first CSE candidate recorded */ hashDsc->csdTreeList = newElem; hashDsc->csdTreeLast = newElem; hashDsc->csdStructHnd = NO_CLASS_HANDLE; hashDsc->csdIsSharedConst = isSharedConst; hashDsc->csdStructHndMismatch = false; if (varTypeIsStruct(tree->gtType)) { // When we have a GT_IND node with a SIMD type then we don't have a reliable // struct handle and gtGetStructHandleIfPresent returns a guess that can be wrong // if ((hashDsc->csdTree->OperGet() != GT_IND) || !varTypeIsSIMD(tree)) { hashDsc->csdStructHnd = gtGetStructHandleIfPresent(hashDsc->csdTree); } } } noway_assert(hashDsc->csdTreeList); /* Append this expression to the end of the list */ newElem = new (this, CMK_TreeStatementList) treeStmtLst; newElem->tslTree = tree; newElem->tslStmt = stmt; newElem->tslBlock = compCurBB; newElem->tslNext = nullptr; hashDsc->csdTreeLast->tslNext = newElem; hashDsc->csdTreeLast = newElem; if (varTypeIsStruct(newElem->tslTree->gtType)) { // When we have a GT_IND node with a SIMD type then we don't have a reliable // struct handle and gtGetStructHandleIfPresent returns a guess that can be wrong // if ((newElem->tslTree->OperGet() != GT_IND) || !varTypeIsSIMD(newElem->tslTree)) { CORINFO_CLASS_HANDLE newElemStructHnd = gtGetStructHandleIfPresent(newElem->tslTree); if (newElemStructHnd != NO_CLASS_HANDLE) { if (hashDsc->csdStructHnd == NO_CLASS_HANDLE) { // The previous node(s) were GT_IND's and didn't carry the struct handle info // The current node does have the struct handle info, so record it now // hashDsc->csdStructHnd = newElemStructHnd; } else if (newElemStructHnd != hashDsc->csdStructHnd) { hashDsc->csdStructHndMismatch = true; #ifdef DEBUG if (verbose) { printf("Abandoned - CSE candidate has mismatching struct handles!\n"); printTreeID(newElem->tslTree); } #endif // DEBUG } } } } optDoCSE = true; // Found a duplicate CSE tree /* Have we assigned a CSE index? */ if (hashDsc->csdIndex == 0) { newCSE = true; break; } assert(FitsIn<signed char>(hashDsc->csdIndex)); tree->gtCSEnum = ((signed char)hashDsc->csdIndex); return hashDsc->csdIndex; } } if (!newCSE) { /* Not found, create a new entry (unless we have too many already) */ if (optCSECandidateCount < MAX_CSE_CNT) { if (optCSEhashCount == optCSEhashMaxCountBeforeResize) { size_t newOptCSEhashSize = optCSEhashSize * s_optCSEhashGrowthFactor; CSEdsc** newOptCSEhash = new (this, CMK_CSE) CSEdsc*[newOptCSEhashSize](); // Iterate through each existing entry, moving to the new table CSEdsc** ptr; CSEdsc* dsc; size_t cnt; for (cnt = optCSEhashSize, ptr = optCSEhash; cnt; cnt--, ptr++) { for (dsc = *ptr; dsc;) { CSEdsc* nextDsc = dsc->csdNextInBucket; size_t newHval = optCSEKeyToHashIndex(dsc->csdHashKey, newOptCSEhashSize); // Move CSEdsc to bucket in enlarged table dsc->csdNextInBucket = newOptCSEhash[newHval]; newOptCSEhash[newHval] = dsc; dsc = nextDsc; } } hval = optCSEKeyToHashIndex(key, newOptCSEhashSize); optCSEhash = newOptCSEhash; optCSEhashSize = newOptCSEhashSize; optCSEhashMaxCountBeforeResize = optCSEhashMaxCountBeforeResize * s_optCSEhashGrowthFactor; } ++optCSEhashCount; hashDsc = new (this, CMK_CSE) CSEdsc; hashDsc->csdHashKey = key; hashDsc->csdConstDefValue = 0; hashDsc->csdConstDefVN = vnStore->VNForNull(); // uninit value hashDsc->csdIndex = 0; hashDsc->csdIsSharedConst = false; hashDsc->csdLiveAcrossCall = false; hashDsc->csdDefCount = 0; hashDsc->csdUseCount = 0; hashDsc->csdDefWtCnt = 0; hashDsc->csdUseWtCnt = 0; hashDsc->defExcSetPromise = vnStore->VNForEmptyExcSet(); hashDsc->defExcSetCurrent = vnStore->VNForNull(); // uninit value hashDsc->defConservNormVN = vnStore->VNForNull(); // uninit value hashDsc->csdTree = tree; hashDsc->csdStmt = stmt; hashDsc->csdBlock = compCurBB; hashDsc->csdTreeList = nullptr; /* Append the entry to the hash bucket */ hashDsc->csdNextInBucket = optCSEhash[hval]; optCSEhash[hval] = hashDsc; } return 0; } else // newCSE is true { /* We get here only after finding a matching CSE */ /* Create a new CSE (unless we have the maximum already) */ if (optCSECandidateCount == MAX_CSE_CNT) { #ifdef DEBUG if (verbose) { printf("Exceeded the MAX_CSE_CNT, not using tree:\n"); gtDispTree(tree); } #endif // DEBUG return 0; } C_ASSERT((signed char)MAX_CSE_CNT == MAX_CSE_CNT); unsigned CSEindex = ++optCSECandidateCount; /* Record the new CSE index in the hashDsc */ hashDsc->csdIndex = CSEindex; /* Update the gtCSEnum field in the original tree */ noway_assert(hashDsc->csdTreeList->tslTree->gtCSEnum == 0); assert(FitsIn<signed char>(CSEindex)); hashDsc->csdTreeList->tslTree->gtCSEnum = ((signed char)CSEindex); noway_assert(((unsigned)hashDsc->csdTreeList->tslTree->gtCSEnum) == CSEindex); tree->gtCSEnum = ((signed char)CSEindex); #ifdef DEBUG if (verbose) { printf("\nCSE candidate #%02u, key=", CSEindex); if (!Compiler::Is_Shared_Const_CSE(key)) { vnPrint((unsigned)key, 0); } else { size_t kVal = Compiler::Decode_Shared_Const_CSE_Value(key); printf("K_%p", dspPtr(kVal)); } printf(" in " FMT_BB ", [cost=%2u, size=%2u]: \n", compCurBB->bbNum, tree->GetCostEx(), tree->GetCostSz()); gtDispTree(tree); } #endif // DEBUG return CSEindex; } } //------------------------------------------------------------------------ // optValnumCSE_Locate: Locate CSE candidates and assign them indices. // // Returns: // true if there are any CSE candidates, false otherwise // bool Compiler::optValnumCSE_Locate() { bool enableConstCSE = true; int configValue = JitConfig.JitConstCSE(); // all platforms - disable CSE of constant values when config is 1 if (configValue == CONST_CSE_DISABLE_ALL) { enableConstCSE = false; } #if !defined(TARGET_ARM64) // non-ARM64 platforms - disable by default // enableConstCSE = false; // Check for the two enable cases for all platforms // if ((configValue == CONST_CSE_ENABLE_ALL) || (configValue == CONST_CSE_ENABLE_ALL_NO_SHARING)) { enableConstCSE = true; } #endif for (BasicBlock* const block : Blocks()) { /* Make the block publicly available */ compCurBB = block; /* Ensure that the BBF_VISITED and BBF_MARKED flag are clear */ /* Everyone who uses these flags are required to clear afterwards */ noway_assert((block->bbFlags & (BBF_VISITED | BBF_MARKED)) == 0); /* Walk the statement trees in this basic block */ for (Statement* const stmt : block->NonPhiStatements()) { const bool isReturn = stmt->GetRootNode()->OperIs(GT_RETURN); /* We walk the tree in the forwards direction (bottom up) */ bool stmtHasArrLenCandidate = false; for (GenTree* const tree : stmt->TreeList()) { if (tree->OperIsCompare() && stmtHasArrLenCandidate) { // Check if this compare is a function of (one of) the checked // bound candidate(s); we may want to update its value number. // if the array length gets CSEd optCseUpdateCheckedBoundMap(tree); } // Don't allow CSE of constants if it is disabled // if (tree->IsIntegralConst()) { if (!enableConstCSE) { continue; } } // Don't allow non-SIMD struct CSEs under a return; we don't fully // re-morph these if we introduce a CSE assignment, and so may create // IR that lower is not yet prepared to handle. // if (isReturn && varTypeIsStruct(tree->gtType) && !varTypeIsSIMD(tree->gtType)) { continue; } if (!optIsCSEcandidate(tree)) { continue; } ValueNum valueVN = vnStore->VNNormalValue(tree->GetVN(VNK_Liberal)); if (ValueNumStore::isReservedVN(valueVN) && (valueVN != ValueNumStore::VNForNull())) { continue; } // We want to CSE simple constant leaf nodes, but we don't want to // CSE non-leaf trees that compute CSE constant values. // Instead we let the Value Number based Assertion Prop phase handle them. // // Here, unlike the rest of optCSE, we use the conservative value number // rather than the liberal one, since the conservative one // is what the Value Number based Assertion Prop will use // and the point is to avoid optimizing cases that it will // handle. // if (!tree->OperIsLeaf() && vnStore->IsVNConstant(vnStore->VNConservativeNormalValue(tree->gtVNPair))) { continue; } /* Assign an index to this expression */ unsigned CSEindex = optValnumCSE_Index(tree, stmt); if (CSEindex != 0) { noway_assert(((unsigned)tree->gtCSEnum) == CSEindex); } if (IS_CSE_INDEX(CSEindex) && (tree->OperGet() == GT_ARR_LENGTH)) { stmtHasArrLenCandidate = true; } } } } /* We're done if there were no interesting expressions */ if (!optDoCSE) { return false; } /* We're finished building the expression lookup table */ optCSEstop(); return true; } //------------------------------------------------------------------------ // optCseUpdateCheckedBoundMap: Check if this compare is a tractable function of // a checked bound that is a CSE candidate, and insert // an entry in the optCseCheckedBoundMap if so. This facilitates // subsequently updating the compare's value number if // the bound gets CSEd. // // Arguments: // compare - The compare node to check // void Compiler::optCseUpdateCheckedBoundMap(GenTree* compare) { assert(compare->OperIsCompare()); ValueNum compareVN = compare->gtVNPair.GetConservative(); VNFuncApp cmpVNFuncApp; if (!vnStore->GetVNFunc(compareVN, &cmpVNFuncApp) || (cmpVNFuncApp.m_func != GetVNFuncForNode(compare))) { // Value numbering inferred this compare as something other // than its own operator; leave its value number alone. return; } // Now look for a checked bound feeding the compare ValueNumStore::CompareCheckedBoundArithInfo info; GenTree* boundParent = nullptr; if (vnStore->IsVNCompareCheckedBound(compareVN)) { // Simple compare of an bound against something else. vnStore->GetCompareCheckedBound(compareVN, &info); boundParent = compare; } else if (vnStore->IsVNCompareCheckedBoundArith(compareVN)) { // Compare of a bound +/- some offset to something else. GenTree* op1 = compare->gtGetOp1(); GenTree* op2 = compare->gtGetOp2(); vnStore->GetCompareCheckedBoundArithInfo(compareVN, &info); if (GetVNFuncForNode(op1) == (VNFunc)info.arrOper) { // The arithmetic node is the bound's parent. boundParent = op1; } else if (GetVNFuncForNode(op2) == (VNFunc)info.arrOper) { // The arithmetic node is the bound's parent. boundParent = op2; } } if (boundParent != nullptr) { GenTree* bound = nullptr; // Find which child of boundParent is the bound. Abort if neither // conservative value number matches the one from the compare VN. GenTree* child1 = boundParent->gtGetOp1(); if ((info.vnBound == child1->gtVNPair.GetConservative()) && IS_CSE_INDEX(child1->gtCSEnum)) { bound = child1; } else { GenTree* child2 = boundParent->gtGetOp2(); if ((info.vnBound == child2->gtVNPair.GetConservative()) && IS_CSE_INDEX(child2->gtCSEnum)) { bound = child2; } } if (bound != nullptr) { // Found a checked bound feeding a compare that is a tractable function of it; // record this in the map so we can update the compare VN if the bound // node gets CSEd. if (optCseCheckedBoundMap == nullptr) { // Allocate map on first use. optCseCheckedBoundMap = new (getAllocator(CMK_CSE)) NodeToNodeMap(getAllocator()); } optCseCheckedBoundMap->Set(bound, compare); } } } /***************************************************************************** * * Compute each blocks bbCseGen * This is the bitset that represents the CSEs that are generated within the block * Also initialize bbCseIn, bbCseOut and bbCseGen sets for all blocks */ void Compiler::optValnumCSE_InitDataFlow() { // BitVec trait information for computing CSE availability using the CSE_DataFlow algorithm. // Two bits are allocated per CSE candidate to compute CSE availability // plus an extra bit to handle the initial unvisited case. // (See CSE_DataFlow::EndMerge for an explaination of why this is necessary) // // The two bits per CSE candidate have the following meanings: // 11 - The CSE is available, and is also available when considering calls as killing availability. // 10 - The CSE is available, but is not available when considering calls as killing availability. // 00 - The CSE is not available // 01 - An illegal combination // const unsigned bitCount = (optCSECandidateCount * 2) + 1; // Init traits and cseCallKillsMask bitvectors. cseLivenessTraits = new (getAllocator(CMK_CSE)) BitVecTraits(bitCount, this); cseCallKillsMask = BitVecOps::MakeEmpty(cseLivenessTraits); for (unsigned inx = 1; inx <= optCSECandidateCount; inx++) { unsigned cseAvailBit = getCSEAvailBit(inx); // a one preserves availability and a zero kills the availability // we generate this kind of bit pattern: 101010101010 // BitVecOps::AddElemD(cseLivenessTraits, cseCallKillsMask, cseAvailBit); } for (BasicBlock* const block : Blocks()) { /* Initialize the blocks's bbCseIn set */ bool init_to_zero = false; if (block == fgFirstBB) { /* Clear bbCseIn for the entry block */ init_to_zero = true; } #if !CSE_INTO_HANDLERS else { if (bbIsHandlerBeg(block)) { /* Clear everything on entry to filters or handlers */ init_to_zero = true; } } #endif if (init_to_zero) { /* Initialize to {ZERO} prior to dataflow */ block->bbCseIn = BitVecOps::MakeEmpty(cseLivenessTraits); } else { /* Initialize to {ALL} prior to dataflow */ block->bbCseIn = BitVecOps::MakeFull(cseLivenessTraits); } block->bbCseOut = BitVecOps::MakeFull(cseLivenessTraits); /* Initialize to {ZERO} prior to locating the CSE candidates */ block->bbCseGen = BitVecOps::MakeEmpty(cseLivenessTraits); } // We walk the set of CSE candidates and set the bit corresponding to the CSEindex // in the block's bbCseGen bitset // for (unsigned inx = 0; inx < optCSECandidateCount; inx++) { CSEdsc* dsc = optCSEtab[inx]; unsigned CSEindex = dsc->csdIndex; treeStmtLst* lst = dsc->csdTreeList; noway_assert(lst); while (lst != nullptr) { BasicBlock* block = lst->tslBlock; unsigned cseAvailBit = getCSEAvailBit(CSEindex); unsigned cseAvailCrossCallBit = getCSEAvailCrossCallBit(CSEindex); // This CSE is generated in 'block', we always set the cseAvailBit // If this block does not contain a call, we also set cseAvailCrossCallBit // // If we have a call in this block then in the loop below we walk the trees // backwards to find any CSEs that are generated after the last call in the block. // BitVecOps::AddElemD(cseLivenessTraits, block->bbCseGen, cseAvailBit); if ((block->bbFlags & BBF_HAS_CALL) == 0) { BitVecOps::AddElemD(cseLivenessTraits, block->bbCseGen, cseAvailCrossCallBit); } lst = lst->tslNext; } } for (BasicBlock* const block : Blocks()) { // If the block doesn't contains a call then skip it... // if ((block->bbFlags & BBF_HAS_CALL) == 0) { continue; } // We only need to examine blocks that generate CSEs // if (BitVecOps::IsEmpty(cseLivenessTraits, block->bbCseGen)) { continue; } // If the block contains a call and generates CSEs, we may need to update // the bbCseGen set as we may generate some CSEs after the last call in the block. // // We walk the statements in this basic block starting at the end and walking backwards, // until we reach the first call // Statement* stmt = block->lastStmt(); bool foundCall = false; while (!foundCall) { // Also walk the tree in the backwards direction (bottom up) // looking for CSE's and updating block->bbCseGen // When we reach a call node, we can exit the for loop // for (GenTree* tree = stmt->GetRootNode(); tree != nullptr; tree = tree->gtPrev) { if (IS_CSE_INDEX(tree->gtCSEnum)) { unsigned CSEnum = GET_CSE_INDEX(tree->gtCSEnum); unsigned cseAvailCrossCallBit = getCSEAvailCrossCallBit(CSEnum); BitVecOps::AddElemD(cseLivenessTraits, block->bbCseGen, cseAvailCrossCallBit); } if (tree->OperGet() == GT_CALL) { // Any cse's that we haven't placed in the block->bbCseGen set // aren't currently alive (using cseAvailCrossCallBit) // foundCall = true; break; } } // The JIT can sometimes remove the only call in the block if (stmt == block->firstStmt()) { break; } stmt = stmt->GetPrevStmt(); } } #ifdef DEBUG // Dump out the bbCseGen information that we just created // if (verbose) { bool headerPrinted = false; for (BasicBlock* const block : Blocks()) { if (!BitVecOps::IsEmpty(cseLivenessTraits, block->bbCseGen)) { if (!headerPrinted) { printf("\nBlocks that generate CSE def/uses\n"); headerPrinted = true; } printf(FMT_BB " cseGen = ", block->bbNum); optPrintCSEDataFlowSet(block->bbCseGen); printf("\n"); } } } fgDebugCheckLinks(); #endif // DEBUG } /***************************************************************************** * * CSE Dataflow, so that all helper methods for dataflow are in a single place * */ class CSE_DataFlow { Compiler* m_comp; EXPSET_TP m_preMergeOut; public: CSE_DataFlow(Compiler* pCompiler) : m_comp(pCompiler), m_preMergeOut(BitVecOps::UninitVal()) { } // At the start of the merge function of the dataflow equations, initialize premerge state (to detect changes.) void StartMerge(BasicBlock* block) { // Record the initial value of block->bbCseOut in m_preMergeOut. // It is used in EndMerge() to control the termination of the DataFlow algorithm. // Note that the first time we visit a block, the value of bbCseOut is MakeFull() // BitVecOps::Assign(m_comp->cseLivenessTraits, m_preMergeOut, block->bbCseOut); #if 0 #ifdef DEBUG if (m_comp->verbose) { printf("StartMerge " FMT_BB "\n", block->bbNum); printf(" :: cseOut = %s\n", genES2str(m_comp->cseLivenessTraits, block->bbCseOut)); } #endif // DEBUG #endif // 0 } // Merge: perform the merging of each of the predecessor's liveness values (since this is a forward analysis) void Merge(BasicBlock* block, BasicBlock* predBlock, unsigned dupCount) { #if 0 #ifdef DEBUG if (m_comp->verbose) { printf("Merge " FMT_BB " and " FMT_BB "\n", block->bbNum, predBlock->bbNum); printf(" :: cseIn = %s\n", genES2str(m_comp->cseLivenessTraits, block->bbCseIn)); printf(" :: cseOut = %s\n", genES2str(m_comp->cseLivenessTraits, block->bbCseOut)); } #endif // DEBUG #endif // 0 BitVecOps::IntersectionD(m_comp->cseLivenessTraits, block->bbCseIn, predBlock->bbCseOut); #if 0 #ifdef DEBUG if (m_comp->verbose) { printf(" => cseIn = %s\n", genES2str(m_comp->cseLivenessTraits, block->bbCseIn)); } #endif // DEBUG #endif // 0 } //------------------------------------------------------------------------ // MergeHandler: Merge CSE values into the first exception handler/filter block. // // Arguments: // block - the block that is the start of a handler or filter; // firstTryBlock - the first block of the try for "block" handler; // lastTryBlock - the last block of the try for "block" handler;. // // Notes: // We can jump to the handler from any instruction in the try region. // It means we can propagate only CSE that are valid for the whole try region. void MergeHandler(BasicBlock* block, BasicBlock* firstTryBlock, BasicBlock* lastTryBlock) { // TODO CQ: add CSE for handler blocks, CSE_INTO_HANDLERS should be defined. } // At the end of the merge store results of the dataflow equations, in a postmerge state. // We also handle the case where calls conditionally kill CSE availabilty. // bool EndMerge(BasicBlock* block) { // We can skip the calls kill step when our block doesn't have a callsite // or we don't have any available CSEs in our bbCseIn // if (((block->bbFlags & BBF_HAS_CALL) == 0) || BitVecOps::IsEmpty(m_comp->cseLivenessTraits, block->bbCseIn)) { // No callsite in 'block' or 'block->bbCseIn was empty, so we can use bbCseIn directly // BitVecOps::DataFlowD(m_comp->cseLivenessTraits, block->bbCseOut, block->bbCseGen, block->bbCseIn); } else { // We will create a temporary BitVec to pass to DataFlowD() // EXPSET_TP cseIn_withCallsKill = BitVecOps::UninitVal(); // cseIn_withCallsKill is set to (bbCseIn AND cseCallKillsMask) // BitVecOps::Assign(m_comp->cseLivenessTraits, cseIn_withCallsKill, block->bbCseIn); BitVecOps::IntersectionD(m_comp->cseLivenessTraits, cseIn_withCallsKill, m_comp->cseCallKillsMask); // Call DataFlowD with the modified BitVec: (bbCseIn AND cseCallKillsMask) // BitVecOps::DataFlowD(m_comp->cseLivenessTraits, block->bbCseOut, block->bbCseGen, cseIn_withCallsKill); } // The bool 'notDone' is our terminating condition. // If it is 'true' then the initial value of m_preMergeOut was different than the final value that // we computed for bbCseOut. When it is true we will visit every the successor of 'block' // // This is also why we need to allocate an extra bit in our cseLivenessTrair BitVecs. // We always need to visit our successor blocks once, thus we require that that the first time // that we visit a block we have a bit set in m_preMergeOut that won't be set when we compute // the new value of bbCseOut. // bool notDone = !BitVecOps::Equal(m_comp->cseLivenessTraits, block->bbCseOut, m_preMergeOut); #if 0 #ifdef DEBUG if (m_comp->verbose) { printf("EndMerge " FMT_BB "\n", block->bbNum); printf(" :: cseIn = %s\n", genES2str(m_comp->cseLivenessTraits, block->bbCseIn)); if (((block->bbFlags & BBF_HAS_CALL) != 0) && !BitVecOps::IsEmpty(m_comp->cseLivenessTraits, block->bbCseIn)) { printf(" -- cseKill = %s\n", genES2str(m_comp->cseLivenessTraits, m_comp->cseCallKillsMask)); } printf(" :: cseGen = %s\n", genES2str(m_comp->cseLivenessTraits, block->bbCseGen)); printf(" => cseOut = %s\n", genES2str(m_comp->cseLivenessTraits, block->bbCseOut)); printf(" != preMerge = %s, => %s\n", genES2str(m_comp->cseLivenessTraits, m_preMergeOut), notDone ? "true" : "false"); } #endif // DEBUG #endif // 0 return notDone; } }; /***************************************************************************** * * Perform a DataFlow forward analysis using the block CSE bitsets: * Inputs: * bbCseGen - Exact CSEs that are always generated within the block * bbCseIn - Maximal estimate of CSEs that are/could be available at input to the block * bbCseOut - Maximal estimate of CSEs that are/could be available at exit to the block * * Outputs: * bbCseIn - Computed CSEs that are available at input to the block * bbCseOut - Computed CSEs that are available at exit to the block */ void Compiler::optValnumCSE_DataFlow() { #ifdef DEBUG if (verbose) { printf("\nPerforming DataFlow for ValnumCSE's\n"); } #endif // DEBUG CSE_DataFlow cse(this); // Modified dataflow algorithm for available expressions. DataFlow cse_flow(this); cse_flow.ForwardAnalysis(cse); #ifdef DEBUG if (verbose) { printf("\nAfter performing DataFlow for ValnumCSE's\n"); for (BasicBlock* const block : Blocks()) { printf(FMT_BB " in gen out\n", block->bbNum); optPrintCSEDataFlowSet(block->bbCseIn); printf("\n"); optPrintCSEDataFlowSet(block->bbCseGen); printf("\n"); optPrintCSEDataFlowSet(block->bbCseOut); printf("\n"); } printf("\n"); } #endif // DEBUG } //--------------------------------------------------------------------------- // optValnumCSE_Availablity: // // Using the information computed by CSE_DataFlow determine for each // CSE whether the CSE is a definition (if the CSE was not available) // or if the CSE is a use (if the CSE was previously made available). // The implementation iterates over all blocks setting 'available_cses' // to the CSEs that are available at input to the block. // When a CSE expression is encountered it is classified as either // as a definition (if the CSE is not in the 'available_cses' set) or // as a use (if the CSE is in the 'available_cses' set). If the CSE // is a definition then it is added to the 'available_cses' set. // // This algorithm uncovers the defs and uses gradually and as it does // so it also builds the exception set that all defs make: 'defExcSetCurrent' // and the exception set that the uses we have seen depend upon: 'defExcSetPromise'. // // Typically expressions with the same normal ValueNum generate exactly the // same exception sets. There are two way that we can get different exception // sets with the same Normal value number. // // 1. We used an arithmetic identiity: // e.g. (p.a + q.b) * 0 :: The normal value for the expression is zero // and we have NullPtrExc(p) and NullPtrExc(q) // e.g. (p.a - p.a) :: The normal value for the expression is zero // and we have NullPtrExc(p) // 2. We stored an expression into a LclVar or into Memory and read it later // e.g. t = p.a; // e1 = (t + q.b) :: e1 has one NullPtrExc and e2 has two. // e2 = (p.a + q.b) but both compute the same normal value // e.g. m.a = p.a; // e1 = (m.a + q.b) :: e1 and e2 have different exception sets. // e2 = (p.a + q.b) but both compute the same normal value // void Compiler::optValnumCSE_Availablity() { #ifdef DEBUG if (verbose) { printf("Labeling the CSEs with Use/Def information\n"); } #endif EXPSET_TP available_cses = BitVecOps::MakeEmpty(cseLivenessTraits); for (BasicBlock* const block : Blocks()) { // Make the block publicly available compCurBB = block; // Retrieve the available CSE's at the start of this block BitVecOps::Assign(cseLivenessTraits, available_cses, block->bbCseIn); // Walk the statement trees in this basic block for (Statement* const stmt : block->NonPhiStatements()) { // We walk the tree in the forwards direction (bottom up) for (GenTree* const tree : stmt->TreeList()) { bool isUse = false; bool isDef = false; if (IS_CSE_INDEX(tree->gtCSEnum)) { unsigned CSEnum = GET_CSE_INDEX(tree->gtCSEnum); unsigned cseAvailBit = getCSEAvailBit(CSEnum); unsigned cseAvailCrossCallBit = getCSEAvailCrossCallBit(CSEnum); CSEdsc* desc = optCSEfindDsc(CSEnum); weight_t stmw = block->getBBWeight(this); isUse = BitVecOps::IsMember(cseLivenessTraits, available_cses, cseAvailBit); isDef = !isUse; // If is isn't a CSE use, it is a CSE def // Is this a "use", that we haven't yet marked as live across a call // and it is not available when we have calls that kill CSE's (cseAvailCrossCallBit) // if the above is true then we will mark this the CSE as live across a call // bool madeLiveAcrossCall = false; if (isUse && !desc->csdLiveAcrossCall && !BitVecOps::IsMember(cseLivenessTraits, available_cses, cseAvailCrossCallBit)) { desc->csdLiveAcrossCall = true; madeLiveAcrossCall = true; } #ifdef DEBUG // If this is a CSE def (i.e. the CSE is not available here, since it is being defined), then the // call-kill bit // should also be zero since it is also not available across a call. // if (isDef) { assert(!BitVecOps::IsMember(cseLivenessTraits, available_cses, cseAvailCrossCallBit)); } if (verbose) { printf(FMT_BB " ", block->bbNum); printTreeID(tree); printf(" %s of " FMT_CSE " [weight=%s]%s\n", isUse ? "Use" : "Def", CSEnum, refCntWtd2str(stmw), madeLiveAcrossCall ? " *** Now Live Across Call ***" : ""); } #endif // DEBUG // Have we decided to abandon work on this CSE? if (desc->defExcSetPromise == ValueNumStore::NoVN) { // This candidate had defs with differing liberal exc set VNs // We have abandoned CSE promotion for this candidate // Clear the CSE flag tree->gtCSEnum = NO_CSE; JITDUMP(" Abandoned - CSE candidate has defs with different exception sets!\n"); continue; } // Record the exception set for tree's liberal value number // ValueNum theLiberalExcSet = vnStore->VNExceptionSet(tree->gtVNPair.GetLiberal()); // Is this a CSE use or a def? if (isDef) { // This is a CSE def // Is defExcSetCurrent still set to the uninit marker value of VNForNull() ? if (desc->defExcSetCurrent == vnStore->VNForNull()) { // This is the first time visited, so record this defs exception set desc->defExcSetCurrent = theLiberalExcSet; } // Have we seen a CSE use and made a promise of an exception set? // if (desc->defExcSetPromise != vnStore->VNForEmptyExcSet()) { // The exeception set held in desc->defExcSetPromise must be a subset of theLiberalExcSet // if (vnStore->VNExcIsSubset(theLiberalExcSet, desc->defExcSetPromise)) { // This new def still satisfies any promise made to all the CSE uses that we have // encountered // // no update is needed when these are the same VN if (desc->defExcSetCurrent != theLiberalExcSet) { // We will change the value of desc->defExcSetCurrent to be the intersection of // these two sets. // This is the set of exceptions that all CSE defs have (that we have visited so // far) // ValueNum intersectionExcSet = vnStore->VNExcSetIntersection(desc->defExcSetCurrent, theLiberalExcSet); #ifdef DEBUG if (this->verbose) { VNFuncApp excSeq; vnStore->GetVNFunc(desc->defExcSetCurrent, &excSeq); printf(">>> defExcSetCurrent is "); vnStore->vnDumpExcSeq(this, &excSeq, true); printf("\n"); vnStore->GetVNFunc(theLiberalExcSet, &excSeq); printf(">>> theLiberalExcSet is "); vnStore->vnDumpExcSeq(this, &excSeq, true); printf("\n"); if (intersectionExcSet == vnStore->VNForEmptyExcSet()) { printf(">>> the intersectionExcSet is the EmptyExcSet\n"); } else { vnStore->GetVNFunc(intersectionExcSet, &excSeq); printf(">>> the intersectionExcSet is "); vnStore->vnDumpExcSeq(this, &excSeq, true); printf("\n"); } } #endif // DEBUG // Change the defExcSetCurrent to be a subset of its prior value // assert(vnStore->VNExcIsSubset(desc->defExcSetCurrent, intersectionExcSet)); desc->defExcSetCurrent = intersectionExcSet; } } else // This CSE def doesn't satisfy one of the exceptions already promised to a CSE use { // So, we will abandon all CSE promotions for this candidate // // We use the marker value of NoVN to indicate that we // should abandon this CSE candidate // desc->defExcSetPromise = ValueNumStore::NoVN; tree->gtCSEnum = NO_CSE; JITDUMP(" Abandon - CSE candidate has defs with exception sets that do not satisfy " "some CSE use\n"); continue; } } // For shared const CSE we don't set/use the defConservNormVN // if (!Is_Shared_Const_CSE(desc->csdHashKey)) { // Record or update the value of desc->defConservNormVN // ValueNum theConservNormVN = vnStore->VNConservativeNormalValue(tree->gtVNPair); // Is defConservNormVN still set to the uninit marker value of VNForNull() ? if (desc->defConservNormVN == vnStore->VNForNull()) { // This is the first def that we have visited, set defConservNormVN desc->defConservNormVN = theConservNormVN; } else { // Check to see if all defs have the same conservative normal VN if (theConservNormVN != desc->defConservNormVN) { // This candidate has defs with differing conservative normal VNs, mark it with NoVN desc->defConservNormVN = ValueNumStore::NoVN; // record the marker for differing VNs } } } // If we get here we have accepted this node as a valid CSE def desc->csdDefCount += 1; desc->csdDefWtCnt += stmw; // Mark the node as a CSE definition tree->gtCSEnum = TO_CSE_DEF(tree->gtCSEnum); // This CSE becomes available after this def BitVecOps::AddElemD(cseLivenessTraits, available_cses, cseAvailBit); BitVecOps::AddElemD(cseLivenessTraits, available_cses, cseAvailCrossCallBit); } else // We are visiting a CSE use { assert(isUse); // If the CSE use has no requirements for an exception set then we don't have to do anything // here // if (theLiberalExcSet != vnStore->VNForEmptyExcSet()) { // Are we visiting a use first, before visiting any defs of this CSE? // This is an atypical case that can occur with a bottom tested loop. // // Is defExcSetCurrent still set to the uninit marker value of VNForNull() ? if (desc->defExcSetCurrent == vnStore->VNForNull()) { // Update defExcSetPromise, this is our required exception set for all CSE defs // that we encounter later. // // We could see multiple uses before a def, so we require the Union of all exception // sets // desc->defExcSetPromise = vnStore->VNExcSetUnion(desc->defExcSetPromise, theLiberalExcSet); } else // we have already seen a def for this CSE and defExcSetCurrent is setup { if (vnStore->VNExcIsSubset(desc->defExcSetCurrent, theLiberalExcSet)) { // The current set of exceptions produced by all CSE defs have (that we have // visited so far) meets our requirement // // Add any exception items to the defExcSetPromise set // desc->defExcSetPromise = vnStore->VNExcSetUnion(desc->defExcSetPromise, theLiberalExcSet); } } // At this point defExcSetPromise contains all of the exception items that we can promise // here. // if (!vnStore->VNExcIsSubset(desc->defExcSetPromise, theLiberalExcSet)) { // We can't safely make this into a CSE use, because this // CSE use has an exception set item that is not promised // by all of our CSE defs. // // We will omit this CSE use from the graph and proceed, // the other uses and defs can still participate in the CSE optimization. // So this can't be a CSE use tree->gtCSEnum = NO_CSE; JITDUMP(" NO_CSE - This use has an exception set item that isn't contained in the " "defs!\n"); continue; } } // When we get here we have accepted this node as a valid CSE use desc->csdUseCount += 1; desc->csdUseWtCnt += stmw; } } // In order to determine if a CSE is live across a call, we model availablity using two bits and // kill all of the cseAvailCrossCallBit for each CSE whenever we see a GT_CALL (unless the call // generates a CSE). // if (tree->OperGet() == GT_CALL) { // Check for the common case of an already empty available_cses set // and thus nothing needs to be killed // if (!(BitVecOps::IsEmpty(cseLivenessTraits, available_cses))) { if (isUse) { // For a CSE Use we will assume that the CSE logic will replace it with a CSE LclVar and // not make the call so kill nothing } else { // partially kill any cse's that are currently alive (using the cseCallKillsMask set) // BitVecOps::IntersectionD(cseLivenessTraits, available_cses, cseCallKillsMask); if (isDef) { // We can have a GT_CALL that produces a CSE, // (i.e. HELPER.CORINFO_HELP_GETSHARED_*STATIC_BASE or // CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) // // The CSE becomes available after the call, so set the cseAvailCrossCallBit bit in // available_cses // unsigned CSEnum = GET_CSE_INDEX(tree->gtCSEnum); unsigned cseAvailCrossCallBit = getCSEAvailCrossCallBit(CSEnum); BitVecOps::AddElemD(cseLivenessTraits, available_cses, cseAvailCrossCallBit); } } } } } } } } // The following class handles the CSE heuristics // we use a complex set of heuristic rules // to determine if it is likely to be profitable to perform this CSE // class CSE_Heuristic { Compiler* m_pCompiler; unsigned m_addCSEcount; weight_t aggressiveRefCnt; weight_t moderateRefCnt; unsigned enregCount; // count of the number of predicted enregistered variables bool largeFrame; bool hugeFrame; Compiler::codeOptimize codeOptKind; Compiler::CSEdsc** sortTab; size_t sortSiz; #ifdef DEBUG CLRRandom m_cseRNG; unsigned m_bias; #endif public: CSE_Heuristic(Compiler* pCompiler) : m_pCompiler(pCompiler) { codeOptKind = m_pCompiler->compCodeOpt(); } Compiler::codeOptimize CodeOptKind() { return codeOptKind; } // Perform the Initialization step for our CSE Heuristics // determine the various cut off values to use for // the aggressive, moderate and conservative CSE promotions // count the number of enregisterable variables // determine if the method has a large or huge stack frame. // void Initialize() { m_addCSEcount = 0; /* Count of the number of LclVars for CSEs that we added */ // Record the weighted ref count of the last "for sure" callee saved LclVar aggressiveRefCnt = 0; moderateRefCnt = 0; enregCount = 0; largeFrame = false; hugeFrame = false; sortTab = nullptr; sortSiz = 0; unsigned frameSize = 0; unsigned regAvailEstimate = ((CNT_CALLEE_ENREG * 3) + (CNT_CALLEE_TRASH * 2) + 1); unsigned lclNum; LclVarDsc* varDsc; for (lclNum = 0, varDsc = m_pCompiler->lvaTable; lclNum < m_pCompiler->lvaCount; lclNum++, varDsc++) { // Locals with no references don't use any local stack frame slots if (varDsc->lvRefCnt() == 0) { continue; } // Incoming stack arguments don't use any local stack frame slots if (varDsc->lvIsParam && !varDsc->lvIsRegArg) { continue; } #if FEATURE_FIXED_OUT_ARGS // Skip the OutgoingArgArea in computing frame size, since // its size is not yet known and it doesn't affect local // offsets from the frame pointer (though it may affect // them from the stack pointer). noway_assert(m_pCompiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM); if (lclNum == m_pCompiler->lvaOutgoingArgSpaceVar) { continue; } #endif // FEATURE_FIXED_OUT_ARGS bool onStack = (regAvailEstimate == 0); // true when it is likely that this LclVar will have a stack home // Some LclVars always have stack homes if ((varDsc->lvDoNotEnregister) || (varDsc->lvType == TYP_LCLBLK)) { onStack = true; } #ifdef TARGET_X86 // Treat floating point and 64 bit integers as always on the stack if (varTypeIsFloating(varDsc->TypeGet()) || varTypeIsLong(varDsc->TypeGet())) onStack = true; #endif if (onStack) { frameSize += m_pCompiler->lvaLclSize(lclNum); } else { // For the purposes of estimating the frameSize we // will consider this LclVar as being enregistered. // Now we reduce the remaining regAvailEstimate by // an appropriate amount. // if (varDsc->lvRefCnt() <= 2) { // a single use single def LclVar only uses 1 regAvailEstimate -= 1; } else { // a LclVar with multiple uses and defs uses 2 if (regAvailEstimate >= 2) { regAvailEstimate -= 2; } else { // Don't try to subtract when regAvailEstimate is 1 regAvailEstimate = 0; } } } #ifdef TARGET_XARCH if (frameSize > 0x080) { // We likely have a large stack frame. // // On XARCH stack frame displacements can either use a 1-byte or a 4-byte displacement // with a large franme we will need to use some 4-byte displacements. // largeFrame = true; break; // early out, we don't need to keep increasing frameSize } #elif defined(TARGET_ARM) if (frameSize > 0x0400) { // We likely have a large stack frame. // // Thus we might need to use large displacements when loading or storing // to CSE LclVars that are not enregistered // On ARM32 this means using rsGetRsvdReg() to hold the large displacement largeFrame = true; } if (frameSize > 0x10000) { hugeFrame = true; break; // early out, we don't need to keep increasing frameSize } #elif defined(TARGET_ARM64) if (frameSize > 0x1000) { // We likely have a large stack frame. // // Thus we might need to use large displacements when loading or storing // to CSE LclVars that are not enregistered // On ARM64 this means using rsGetRsvdReg() to hold the large displacement // largeFrame = true; break; // early out, we don't need to keep increasing frameSize } #endif } // Iterate over the sorted list of tracked local variables // these are the register candidates for LSRA // We normally vist the LclVar in order of their weighted ref counts // and our hueristic assumes that the highest weighted ref count // LclVars will be enregistered and that the lowest weighted ref count // are likely be allocated in the stack frame. // The value of enregCount is incremented when we visit a LclVar // that can be enregistered. // for (unsigned trackedIndex = 0; trackedIndex < m_pCompiler->lvaTrackedCount; trackedIndex++) { LclVarDsc* varDsc = m_pCompiler->lvaGetDescByTrackedIndex(trackedIndex); var_types varTyp = varDsc->TypeGet(); // Locals with no references aren't enregistered if (varDsc->lvRefCnt() == 0) { continue; } // Some LclVars always have stack homes if ((varDsc->lvDoNotEnregister) || (varDsc->lvType == TYP_LCLBLK)) { continue; } // The enregCount only tracks the uses of integer registers // // We could track floating point register usage seperately // but it isn't worth the additional complexity as floating point CSEs // are rare and we typically have plenty of floating point register available. // if (!varTypeIsFloating(varTyp)) { enregCount++; // The primitive types, including TYP_SIMD types use one register #ifndef TARGET_64BIT if (varTyp == TYP_LONG) { enregCount++; // on 32-bit targets longs use two registers } #endif } // Set the cut off values to use for deciding when we want to use aggressive, moderate or conservative // // The value of aggressiveRefCnt and moderateRefCnt start off as zero and // when enregCount reached a certain value we assign the current LclVar // (weighted) ref count to aggressiveRefCnt or moderateRefCnt. // const unsigned aggressiveEnregNum = (CNT_CALLEE_ENREG * 3 / 2); const unsigned moderateEnregNum = ((CNT_CALLEE_ENREG * 3) + (CNT_CALLEE_TRASH * 2)); // // On Windows x64 this yeilds: // aggressiveEnregNum == 12 and moderateEnregNum == 38 // Thus we will typically set the cutoff values for // aggressiveRefCnt based upon the weight of T13 (the 13th tracked LclVar) // moderateRefCnt based upon the weight of T39 (the 39th tracked LclVar) // // For other architecture and platforms these values dynamically change // based upon the number of callee saved and callee scratch registers. // if ((aggressiveRefCnt == 0) && (enregCount > aggressiveEnregNum)) { if (CodeOptKind() == Compiler::SMALL_CODE) { aggressiveRefCnt = varDsc->lvRefCnt(); } else { aggressiveRefCnt = varDsc->lvRefCntWtd(); } aggressiveRefCnt += BB_UNITY_WEIGHT; } if ((moderateRefCnt == 0) && (enregCount > ((CNT_CALLEE_ENREG * 3) + (CNT_CALLEE_TRASH * 2)))) { if (CodeOptKind() == Compiler::SMALL_CODE) { moderateRefCnt = varDsc->lvRefCnt(); } else { moderateRefCnt = varDsc->lvRefCntWtd(); } moderateRefCnt += (BB_UNITY_WEIGHT / 2); } } // The minumum value that we want to use for aggressiveRefCnt is BB_UNITY_WEIGHT * 2 // so increase it when we are below that value // aggressiveRefCnt = max(BB_UNITY_WEIGHT * 2, aggressiveRefCnt); // The minumum value that we want to use for moderateRefCnt is BB_UNITY_WEIGHT // so increase it when we are below that value // moderateRefCnt = max(BB_UNITY_WEIGHT, moderateRefCnt); #ifdef DEBUG if (m_pCompiler->verbose) { printf("\n"); printf("Aggressive CSE Promotion cutoff is %f\n", aggressiveRefCnt); printf("Moderate CSE Promotion cutoff is %f\n", moderateRefCnt); printf("enregCount is %u\n", enregCount); printf("Framesize estimate is 0x%04X\n", frameSize); printf("We have a %s frame\n", hugeFrame ? "huge" : (largeFrame ? "large" : "small")); } #endif } void SortCandidates() { /* Create an expression table sorted by decreasing cost */ sortTab = new (m_pCompiler, CMK_CSE) Compiler::CSEdsc*[m_pCompiler->optCSECandidateCount]; sortSiz = m_pCompiler->optCSECandidateCount * sizeof(*sortTab); memcpy(sortTab, m_pCompiler->optCSEtab, sortSiz); if (CodeOptKind() == Compiler::SMALL_CODE) { jitstd::sort(sortTab, sortTab + m_pCompiler->optCSECandidateCount, Compiler::optCSEcostCmpSz()); } else { jitstd::sort(sortTab, sortTab + m_pCompiler->optCSECandidateCount, Compiler::optCSEcostCmpEx()); } #ifdef DEBUG if (m_pCompiler->verbose) { printf("\nSorted CSE candidates:\n"); /* Print out the CSE candidates */ for (unsigned cnt = 0; cnt < m_pCompiler->optCSECandidateCount; cnt++) { Compiler::CSEdsc* dsc = sortTab[cnt]; GenTree* expr = dsc->csdTree; weight_t def; weight_t use; unsigned cost; if (CodeOptKind() == Compiler::SMALL_CODE) { def = dsc->csdDefCount; // def count use = dsc->csdUseCount; // use count (excluding the implicit uses at defs) cost = dsc->csdTree->GetCostSz(); } else { def = dsc->csdDefWtCnt; // weighted def count use = dsc->csdUseWtCnt; // weighted use count (excluding the implicit uses at defs) cost = dsc->csdTree->GetCostEx(); } if (!Compiler::Is_Shared_Const_CSE(dsc->csdHashKey)) { printf(FMT_CSE ", {$%-3x, $%-3x} useCnt=%d: [def=%3f, use=%3f, cost=%3u%s]\n :: ", dsc->csdIndex, dsc->csdHashKey, dsc->defExcSetPromise, dsc->csdUseCount, def, use, cost, dsc->csdLiveAcrossCall ? ", call" : " "); } else { size_t kVal = Compiler::Decode_Shared_Const_CSE_Value(dsc->csdHashKey); printf(FMT_CSE ", {K_%p} useCnt=%d: [def=%3f, use=%3f, cost=%3u%s]\n :: ", dsc->csdIndex, dspPtr(kVal), dsc->csdUseCount, def, use, cost, dsc->csdLiveAcrossCall ? ", call" : " "); } m_pCompiler->gtDispTree(expr, nullptr, nullptr, true); } printf("\n"); } #endif // DEBUG } // The following class nested within CSE_Heuristic encapsulates the information // about the current CSE candidate that is under consideration // // TODO-Cleanup: This is still very much based upon the old Lexical CSE implementation // and needs to be reworked for the Value Number based implementation // class CSE_Candidate { CSE_Heuristic* m_context; Compiler::CSEdsc* m_CseDsc; unsigned m_cseIndex; weight_t m_defCount; weight_t m_useCount; unsigned m_Cost; unsigned m_Size; // When this Candidate is successfully promoted to a CSE we record // the following information about what category was used when promoting it. // // We will set m_Aggressive: // When we believe that the CSE very valuable in terms of weighted ref counts, // such that it would always be enregistered by the register allocator. // // We will set m_Moderate: // When we believe that the CSE is moderately valuable in terms of weighted ref counts, // such that it is more likely than not to be enregistered by the register allocator // // We will set m_Conservative: // When we didn't set m_Aggressive or m_Moderate. // Such candidates typically are expensive to compute and thus are // always profitable to promote even when they aren't enregistered. // // We will set m_StressCSE: // When the candidate is only being promoted because of a Stress mode. // bool m_Aggressive; bool m_Moderate; bool m_Conservative; bool m_StressCSE; public: CSE_Candidate(CSE_Heuristic* context, Compiler::CSEdsc* cseDsc) : m_context(context) , m_CseDsc(cseDsc) , m_cseIndex(m_CseDsc->csdIndex) , m_defCount(0) , m_useCount(0) , m_Cost(0) , m_Size(0) , m_Aggressive(false) , m_Moderate(false) , m_Conservative(false) , m_StressCSE(false) { } Compiler::CSEdsc* CseDsc() { return m_CseDsc; } unsigned CseIndex() { return m_cseIndex; } weight_t DefCount() { return m_defCount; } weight_t UseCount() { return m_useCount; } // TODO-CQ: With ValNum CSE's the Expr and its cost can vary. GenTree* Expr() { return m_CseDsc->csdTree; } unsigned Cost() { return m_Cost; } unsigned Size() { return m_Size; } bool IsSharedConst() { return m_CseDsc->csdIsSharedConst; } bool LiveAcrossCall() { return m_CseDsc->csdLiveAcrossCall; } void SetAggressive() { m_Aggressive = true; } bool IsAggressive() { return m_Aggressive; } void SetModerate() { m_Moderate = true; } bool IsModerate() { return m_Moderate; } void SetConservative() { m_Conservative = true; } bool IsConservative() { return m_Conservative; } void SetStressCSE() { m_StressCSE = true; } bool IsStressCSE() { return m_StressCSE; } void InitializeCounts() { m_Size = Expr()->GetCostSz(); // always the GetCostSz() if (m_context->CodeOptKind() == Compiler::SMALL_CODE) { m_Cost = m_Size; // the estimated code size m_defCount = m_CseDsc->csdDefCount; // def count m_useCount = m_CseDsc->csdUseCount; // use count (excluding the implicit uses at defs) } else { m_Cost = Expr()->GetCostEx(); // the estimated execution cost m_defCount = m_CseDsc->csdDefWtCnt; // weighted def count m_useCount = m_CseDsc->csdUseWtCnt; // weighted use count (excluding the implicit uses at defs) } } }; #ifdef DEBUG //------------------------------------------------------------------------ // optConfigBiasedCSE: // Stress mode to shuffle the decision to CSE or not using environment // variable COMPlus_JitStressBiasedCSE (= 0 to 100%). When the bias value // is not specified but COMPlus_JitStress is ON, generate a random bias. // // Return Value: // 0 -- This method is indifferent about this CSE (no bias specified and no stress) // 1 -- This CSE must be performed to maintain specified/generated bias. // -1 -- This CSE mustn't be performed to maintain specified/generated bias. // // Operation: // A debug stress only method that returns "1" with probability (P) // defined by: // // P = (COMPlus_JitStressBiasedCSE / 100) (or) // P = (random(100) / 100) when COMPlus_JitStress is specified and // COMPlus_JitStressBiasedCSE is unspecified. // // When specified, the bias is reinterpreted as a decimal number between 0 // to 100. // When bias is not specified, a bias is randomly generated if COMPlus_JitStress // is non-zero. // // Callers are supposed to call this method for each CSE promotion decision // and ignore the call if return value is 0 and honor the 1 with a CSE and // -1 with a no-CSE to maintain the specified/generated bias. // int optConfigBiasedCSE() { // Seed the PRNG, if never done before. if (!m_cseRNG.IsInitialized()) { m_cseRNG.Init(m_pCompiler->info.compMethodHash()); m_bias = m_cseRNG.Next(100); } // Obtain the bias value and reinterpret as decimal. unsigned bias = ReinterpretHexAsDecimal(JitConfig.JitStressBiasedCSE()); // Invalid value, check if JitStress is ON. if (bias > 100) { if (!m_pCompiler->compStressCompile(Compiler::STRESS_MAKE_CSE, MAX_STRESS_WEIGHT)) { // JitStress is OFF for CSE, nothing to do. return 0; } bias = m_bias; JITDUMP("JitStressBiasedCSE is OFF, but JitStress is ON: generated bias=%d.\n", bias); } // Generate a number between (0, 99) and if the generated // number is smaller than bias, then perform CSE. unsigned gen = m_cseRNG.Next(100); int ret = (gen < bias) ? 1 : -1; if (m_pCompiler->verbose) { if (ret < 0) { printf("No CSE because gen=%d >= bias=%d\n", gen, bias); } else { printf("Promoting CSE because gen=%d < bias=%d\n", gen, bias); } } // Indicate whether to perform CSE or not. return ret; } #endif // Given a CSE candidate decide whether it passes or fails the profitability heuristic // return true if we believe that it is profitable to promote this candidate to a CSE // bool PromotionCheck(CSE_Candidate* candidate) { bool result = false; #ifdef DEBUG int stressResult = optConfigBiasedCSE(); if (stressResult != 0) { // Stress is enabled. Check whether to perform CSE or not. if (stressResult > 0) { candidate->SetStressCSE(); return true; } } if (m_pCompiler->optConfigDisableCSE2()) { return false; // skip this CSE } #endif /* Our calculation is based on the following cost estimate formula Existing costs are: (def + use) * cost If we introduce a CSE temp are each definition and replace the use with a CSE temp then our cost is: (def * (cost + cse-def-cost)) + (use * cse-use-cost) We must estimate the values to use for cse-def-cost and cse-use-cost If we are able to enregister the CSE then the cse-use-cost is one and cse-def-cost is either zero or one. Zero in the case where we needed to evaluate the def into a register and we can use that register as the CSE temp as well. If we are unable to enregister the CSE then the cse-use-cost is IND_COST and the cse-def-cost is also IND_COST. If we want to be conservative we use IND_COST as the the value for both cse-def-cost and cse-use-cost and then we never introduce a CSE that could pessimize the execution time of the method. If we want to be more moderate we use (IND_COST_EX + 1) / 2 as the values for both cse-def-cost and cse-use-cost. If we want to be aggressive we use 1 as the values for both cse-def-cost and cse-use-cost. If we believe that the CSE very valuable in terms of weighted ref counts such that it would always be enregistered by the register allocator we choose the aggressive use def costs. If we believe that the CSE is somewhat valuable in terms of weighted ref counts such that it could be likely be enregistered by the register allocator we choose the moderate use def costs. otherwise we choose the conservative use def costs. */ unsigned cse_def_cost; unsigned cse_use_cost; weight_t no_cse_cost = 0; weight_t yes_cse_cost = 0; unsigned extra_yes_cost = 0; unsigned extra_no_cost = 0; // The 'cseRefCnt' is the RefCnt that we will have if we promote this CSE into a new LclVar // Each CSE Def will contain two Refs and each CSE Use will have one Ref of this new LclVar weight_t cseRefCnt = (candidate->DefCount() * 2) + candidate->UseCount(); bool canEnregister = true; unsigned slotCount = 1; if (candidate->Expr()->TypeIs(TYP_STRUCT)) { // This is a non-enregisterable struct. canEnregister = false; CORINFO_CLASS_HANDLE structHnd = m_pCompiler->gtGetStructHandleIfPresent(candidate->Expr()); if (structHnd == NO_CLASS_HANDLE) { JITDUMP("Can't determine the struct size, so we can't consider it for CSE promotion\n"); return false; // Do not make this a CSE } unsigned size = m_pCompiler->info.compCompHnd->getClassSize(structHnd); // Note that the slotCount is used to estimate the reference cost, but it may overestimate this // because it doesn't take into account that we might use a vector register for struct copies. slotCount = (size + TARGET_POINTER_SIZE - 1) / TARGET_POINTER_SIZE; } if (CodeOptKind() == Compiler::SMALL_CODE) { // Note that when optimizing for SMALL_CODE we set the cse_def_cost/cse_use_cost based // upon the code size and we use unweighted ref counts instead of weighted ref counts. // Also note that optimizing for SMALL_CODE is rare, we typically only optimize this way // for class constructors, because we know that they will only run once. // if (cseRefCnt >= aggressiveRefCnt) { // Record that we are choosing to use the aggressive promotion rules // candidate->SetAggressive(); #ifdef DEBUG if (m_pCompiler->verbose) { printf("Aggressive CSE Promotion (%f >= %f)\n", cseRefCnt, aggressiveRefCnt); } #endif // With aggressive promotion we expect that the candidate will be enregistered // so we set the use and def costs to their miniumum values // cse_def_cost = 1; cse_use_cost = 1; // Check if this candidate is likely to live on the stack // if (candidate->LiveAcrossCall() || !canEnregister) { // Increase the costs when we have a large or huge frame // if (largeFrame) { cse_def_cost++; cse_use_cost++; } if (hugeFrame) { cse_def_cost++; cse_use_cost++; } } } else // not aggressiveRefCnt { // Record that we are choosing to use the conservative promotion rules // candidate->SetConservative(); if (largeFrame) { #ifdef DEBUG if (m_pCompiler->verbose) { printf("Codesize CSE Promotion (%s frame)\n", hugeFrame ? "huge" : "large"); } #endif #ifdef TARGET_XARCH /* The following formula is good choice when optimizing CSE for SMALL_CODE */ cse_def_cost = 6; // mov [EBP-0x00001FC],reg cse_use_cost = 5; // [EBP-0x00001FC] #else // TARGET_ARM if (hugeFrame) { cse_def_cost = 10 + 2; // movw/movt r10 and str reg,[sp+r10] cse_use_cost = 10 + 2; } else { cse_def_cost = 6 + 2; // movw r10 and str reg,[sp+r10] cse_use_cost = 6 + 2; } #endif } else // small frame { #ifdef DEBUG if (m_pCompiler->verbose) { printf("Codesize CSE Promotion (small frame)\n"); } #endif #ifdef TARGET_XARCH /* The following formula is good choice when optimizing CSE for SMALL_CODE */ cse_def_cost = 3; // mov [EBP-1C],reg cse_use_cost = 2; // [EBP-1C] #else // TARGET_ARM cse_def_cost = 2; // str reg,[sp+0x9c] cse_use_cost = 2; // ldr reg,[sp+0x9c] #endif } } #ifdef TARGET_AMD64 if (varTypeIsFloating(candidate->Expr()->TypeGet())) { // floating point loads/store encode larger cse_def_cost += 2; cse_use_cost += 1; } #endif // TARGET_AMD64 } else // not SMALL_CODE ... { // Note that when optimizing for BLENDED_CODE or FAST_CODE we set cse_def_cost/cse_use_cost // based upon the execution costs of the code and we use weighted ref counts. // if ((cseRefCnt >= aggressiveRefCnt) && canEnregister) { // Record that we are choosing to use the aggressive promotion rules // candidate->SetAggressive(); #ifdef DEBUG if (m_pCompiler->verbose) { printf("Aggressive CSE Promotion (%f >= %f)\n", cseRefCnt, aggressiveRefCnt); } #endif // With aggressive promotion we expect that the candidate will be enregistered // so we set the use and def costs to their miniumum values // cse_def_cost = 1; cse_use_cost = 1; } else if (cseRefCnt >= moderateRefCnt) { // Record that we are choosing to use the moderate promotion rules // candidate->SetModerate(); if (!candidate->LiveAcrossCall() && canEnregister) { #ifdef DEBUG if (m_pCompiler->verbose) { printf("Moderate CSE Promotion (CSE never live at call) (%f >= %f)\n", cseRefCnt, moderateRefCnt); } #endif cse_def_cost = 2; cse_use_cost = 1; } else // candidate is live across call or not enregisterable. { #ifdef DEBUG if (m_pCompiler->verbose) { printf("Moderate CSE Promotion (%s) (%f >= %f)\n", candidate->LiveAcrossCall() ? "CSE is live across a call" : "not enregisterable", cseRefCnt, moderateRefCnt); } #endif cse_def_cost = 2; if (canEnregister) { if (enregCount < (CNT_CALLEE_ENREG * 3 / 2)) { cse_use_cost = 1; } else { cse_use_cost = 2; } } else { cse_use_cost = 3; } } } else // Conservative CSE promotion { // Record that we are choosing to use the conservative promotion rules // candidate->SetConservative(); if (!candidate->LiveAcrossCall() && canEnregister) { #ifdef DEBUG if (m_pCompiler->verbose) { printf("Conservative CSE Promotion (%s) (%f < %f)\n", candidate->LiveAcrossCall() ? "CSE is live across a call" : "not enregisterable", cseRefCnt, moderateRefCnt); } #endif cse_def_cost = 2; cse_use_cost = 2; } else // candidate is live across call { #ifdef DEBUG if (m_pCompiler->verbose) { printf("Conservative CSE Promotion (%f < %f)\n", cseRefCnt, moderateRefCnt); } #endif cse_def_cost = 2; cse_use_cost = 3; } // If we have maxed out lvaTrackedCount then this CSE may end up as an untracked variable if (m_pCompiler->lvaTrackedCount == (unsigned)JitConfig.JitMaxLocalsToTrack()) { cse_def_cost += 1; cse_use_cost += 1; } } } if (slotCount > 1) { cse_def_cost *= slotCount; cse_use_cost *= slotCount; } // If this CSE is live across a call then we may have additional costs // if (candidate->LiveAcrossCall()) { // If we have a floating-point CSE that is both live across a call and there // are no callee-saved FP registers available, the RA will have to spill at // the def site and reload at the (first) use site, if the variable is a register // candidate. Account for that. if (varTypeIsFloating(candidate->Expr()) && (CNT_CALLEE_SAVED_FLOAT == 0) && !candidate->IsConservative()) { cse_def_cost += 1; cse_use_cost += 1; } // If we don't have a lot of variables to enregister or we have a floating point type // then we will likely need to spill an additional caller save register. // if ((enregCount < (CNT_CALLEE_ENREG * 3 / 2)) || varTypeIsFloating(candidate->Expr())) { // Extra cost in case we have to spill/restore a caller saved register extra_yes_cost = BB_UNITY_WEIGHT_UNSIGNED; if (cseRefCnt < moderateRefCnt) // If Conservative CSE promotion { extra_yes_cost *= 2; // full cost if we are being Conservative } } #ifdef FEATURE_SIMD // SIMD types may cause a SIMD register to be spilled/restored in the prolog and epilog. // if (varTypeIsSIMD(candidate->Expr()->TypeGet())) { // We don't have complete information about when these extra spilled/restore will be needed. // Instead we are conservative and assume that each SIMD CSE that is live across a call // will cause an additional spill/restore in the prolog and epilog. // int spillSimdRegInProlog = 1; // If we have a SIMD32 that is live across a call we have even higher spill costs // if (candidate->Expr()->TypeGet() == TYP_SIMD32) { // Additionally for a simd32 CSE candidate we assume that and second spilled/restore will be needed. // (to hold the upper half of the simd32 register that isn't preserved across the call) // spillSimdRegInProlog++; // We also increase the CSE use cost here to because we may have to generate instructions // to move the upper half of the simd32 before and after a call. // cse_use_cost += 2; } extra_yes_cost = (BB_UNITY_WEIGHT_UNSIGNED * spillSimdRegInProlog) * 3; } #endif // FEATURE_SIMD } // estimate the cost from lost codesize reduction if we do not perform the CSE if (candidate->Size() > cse_use_cost) { Compiler::CSEdsc* dsc = candidate->CseDsc(); // We need to retrieve the actual use count, not the // weighted count extra_no_cost = candidate->Size() - cse_use_cost; extra_no_cost = extra_no_cost * dsc->csdUseCount * 2; } /* no_cse_cost is the cost estimate when we decide not to make a CSE */ /* yes_cse_cost is the cost estimate when we decide to make a CSE */ no_cse_cost = candidate->UseCount() * candidate->Cost(); yes_cse_cost = (candidate->DefCount() * cse_def_cost) + (candidate->UseCount() * cse_use_cost); no_cse_cost += extra_no_cost; yes_cse_cost += extra_yes_cost; #ifdef DEBUG if (m_pCompiler->verbose) { printf("cseRefCnt=%f, aggressiveRefCnt=%f, moderateRefCnt=%f\n", cseRefCnt, aggressiveRefCnt, moderateRefCnt); printf("defCnt=%f, useCnt=%f, cost=%d, size=%d%s\n", candidate->DefCount(), candidate->UseCount(), candidate->Cost(), candidate->Size(), candidate->LiveAcrossCall() ? ", LiveAcrossCall" : ""); printf("def_cost=%d, use_cost=%d, extra_no_cost=%d, extra_yes_cost=%d\n", cse_def_cost, cse_use_cost, extra_no_cost, extra_yes_cost); printf("CSE cost savings check (%f >= %f) %s\n", no_cse_cost, yes_cse_cost, (no_cse_cost >= yes_cse_cost) ? "passes" : "fails"); } #endif // DEBUG // Should we make this candidate into a CSE? // Is the yes cost less than the no cost // if (yes_cse_cost <= no_cse_cost) { result = true; // Yes make this a CSE } else { /* In stress mode we will make some extra CSEs */ if (no_cse_cost > 0) { int percentage = (int)((no_cse_cost * 100) / yes_cse_cost); if (m_pCompiler->compStressCompile(Compiler::STRESS_MAKE_CSE, percentage)) { result = true; // Yes make this a CSE } } } return result; } // IsCompatibleType() takes two var_types and returns true if they // are compatible types for CSE substitution // bool IsCompatibleType(var_types cseLclVarTyp, var_types expTyp) { // Exact type match is the expected case if (cseLclVarTyp == expTyp) { return true; } // We also allow TYP_BYREF and TYP_I_IMPL as compatible types // if ((cseLclVarTyp == TYP_BYREF) && (expTyp == TYP_I_IMPL)) { return true; } if ((cseLclVarTyp == TYP_I_IMPL) && (expTyp == TYP_BYREF)) { return true; } // Otherwise we have incompatible types return false; } // PerformCSE() takes a successful candidate and performs the appropriate replacements: // // It will replace all of the CSE defs with assignments to a new "cse0" LclVar // and will replace all of the CSE uses with reads of the "cse0" LclVar // // It will also put cse0 into SSA if there is just one def. void PerformCSE(CSE_Candidate* successfulCandidate) { weight_t cseRefCnt = (successfulCandidate->DefCount() * 2) + successfulCandidate->UseCount(); if (successfulCandidate->LiveAcrossCall() != 0) { // As we introduce new LclVars for these CSE we slightly // increase the cutoffs for aggressive and moderate CSE's // weight_t incr = BB_UNITY_WEIGHT; if (cseRefCnt > aggressiveRefCnt) { aggressiveRefCnt += incr; } if (cseRefCnt > moderateRefCnt) { moderateRefCnt += (incr / 2); } } #ifdef DEBUG // Setup the message arg for lvaGrabTemp() // const char* grabTempMessage = "CSE - unknown"; if (successfulCandidate->IsAggressive()) { grabTempMessage = "CSE - aggressive"; } else if (successfulCandidate->IsModerate()) { grabTempMessage = "CSE - moderate"; } else if (successfulCandidate->IsConservative()) { grabTempMessage = "CSE - conservative"; } else if (successfulCandidate->IsStressCSE()) { grabTempMessage = "CSE - stress mode"; } #endif // DEBUG /* Introduce a new temp for the CSE */ // we will create a long lifetime temp for the new CSE LclVar unsigned cseLclVarNum = m_pCompiler->lvaGrabTemp(false DEBUGARG(grabTempMessage)); var_types cseLclVarTyp = genActualType(successfulCandidate->Expr()->TypeGet()); if (varTypeIsStruct(cseLclVarTyp)) { // Retrieve the struct handle that we recorded while bulding the list of CSE candidates. // If all occurrences were in GT_IND nodes it could still be NO_CLASS_HANDLE // CORINFO_CLASS_HANDLE structHnd = successfulCandidate->CseDsc()->csdStructHnd; if (structHnd == NO_CLASS_HANDLE) { assert(varTypeIsSIMD(cseLclVarTyp)); // We are not setting it for `SIMD* indir` during the first path // because it is not precise, see `optValnumCSE_Index`. structHnd = m_pCompiler->gtGetStructHandle(successfulCandidate->CseDsc()->csdTree); } assert(structHnd != NO_CLASS_HANDLE); m_pCompiler->lvaSetStruct(cseLclVarNum, structHnd, false); } m_pCompiler->lvaTable[cseLclVarNum].lvType = cseLclVarTyp; m_pCompiler->lvaTable[cseLclVarNum].lvIsCSE = true; // Record that we created a new LclVar for use as a CSE temp m_addCSEcount++; m_pCompiler->optCSEcount++; // Walk all references to this CSE, adding an assignment // to the CSE temp to all defs and changing all refs to // a simple use of the CSE temp. // // Later we will unmark any nested CSE's for the CSE uses. // Compiler::CSEdsc* dsc = successfulCandidate->CseDsc(); // If there's just a single def for the CSE, we'll put this // CSE into SSA form on the fly. We won't need any PHIs. unsigned cseSsaNum = SsaConfig::RESERVED_SSA_NUM; if (dsc->csdDefCount == 1) { JITDUMP(FMT_CSE " is single-def, so associated CSE temp V%02u will be in SSA\n", dsc->csdIndex, cseLclVarNum); m_pCompiler->lvaTable[cseLclVarNum].lvInSsa = true; // Allocate the ssa num CompAllocator allocator = m_pCompiler->getAllocator(CMK_SSA); cseSsaNum = m_pCompiler->lvaTable[cseLclVarNum].lvPerSsaData.AllocSsaNum(allocator); } // Verify that all of the ValueNumbers in this list are correct as // Morph will change them when it performs a mutating operation. // bool setRefCnt = true; bool allSame = true; bool isSharedConst = successfulCandidate->IsSharedConst(); ValueNum bestVN = ValueNumStore::NoVN; bool bestIsDef = false; ssize_t bestConstValue = 0; Compiler::treeStmtLst* lst = dsc->csdTreeList; while (lst != nullptr) { // Ignore this node if the gtCSEnum value has been cleared if (IS_CSE_INDEX(lst->tslTree->gtCSEnum)) { // We used the liberal Value numbers when building the set of CSE ValueNum currVN = m_pCompiler->vnStore->VNLiberalNormalValue(lst->tslTree->gtVNPair); assert(currVN != ValueNumStore::NoVN); ssize_t curConstValue = isSharedConst ? m_pCompiler->vnStore->CoercedConstantValue<ssize_t>(currVN) : 0; GenTree* exp = lst->tslTree; bool isDef = IS_CSE_DEF(exp->gtCSEnum); if (bestVN == ValueNumStore::NoVN) { // first entry // set bestVN bestVN = currVN; if (isSharedConst) { // set bestConstValue and bestIsDef bestConstValue = curConstValue; bestIsDef = isDef; } } else if (currVN != bestVN) { assert(isSharedConst); // Must be true when we have differing VNs // subsequent entry // clear allSame and check for a lower constant allSame = false; ssize_t diff = curConstValue - bestConstValue; // The ARM addressing modes allow for a subtraction of up to 255 // so we will allow the diff to be up to -255 before replacing a CSE def // This will minimize the number of extra subtract instructions. // if ((bestIsDef && (diff < -255)) || (!bestIsDef && (diff < 0))) { // set new bestVN, bestConstValue and bestIsDef bestVN = currVN; bestConstValue = curConstValue; bestIsDef = isDef; } } BasicBlock* blk = lst->tslBlock; weight_t curWeight = blk->getBBWeight(m_pCompiler); if (setRefCnt) { m_pCompiler->lvaTable[cseLclVarNum].setLvRefCnt(1); m_pCompiler->lvaTable[cseLclVarNum].setLvRefCntWtd(curWeight); setRefCnt = false; } else { m_pCompiler->lvaTable[cseLclVarNum].incRefCnts(curWeight, m_pCompiler); } // A CSE Def references the LclVar twice // if (isDef) { m_pCompiler->lvaTable[cseLclVarNum].incRefCnts(curWeight, m_pCompiler); } } lst = lst->tslNext; } dsc->csdConstDefValue = bestConstValue; dsc->csdConstDefVN = bestVN; #ifdef DEBUG if (m_pCompiler->verbose) { if (!allSame) { if (isSharedConst) { printf("\nWe have shared Const CSE's and selected " FMT_VN " with a value of 0x%p as the base.\n", dsc->csdConstDefVN, dspPtr(dsc->csdConstDefValue)); } else // !isSharedConst { lst = dsc->csdTreeList; GenTree* firstTree = lst->tslTree; printf("In %s, CSE (oper = %s, type = %s) has differing VNs: ", m_pCompiler->info.compFullName, GenTree::OpName(firstTree->OperGet()), varTypeName(firstTree->TypeGet())); while (lst != nullptr) { if (IS_CSE_INDEX(lst->tslTree->gtCSEnum)) { ValueNum currVN = m_pCompiler->vnStore->VNLiberalNormalValue(lst->tslTree->gtVNPair); printf("[%06d](%s " FMT_VN ") ", m_pCompiler->dspTreeID(lst->tslTree), IS_CSE_USE(lst->tslTree->gtCSEnum) ? "use" : "def", currVN); } lst = lst->tslNext; } printf("\n"); } } } #endif // DEBUG // Setup 'lst' to point at the start of this candidate list lst = dsc->csdTreeList; noway_assert(lst); do { /* Process the next node in the list */ GenTree* exp = lst->tslTree; Statement* stmt = lst->tslStmt; BasicBlock* blk = lst->tslBlock; /* Advance to the next node in the list */ lst = lst->tslNext; // We may have cleared this CSE in optValuenumCSE_Availablity // due to different exception sets. // // Ignore this node if the gtCSEnum value has been cleared if (!IS_CSE_INDEX(exp->gtCSEnum)) { continue; } // Assert if we used DEBUG_DESTROY_NODE on this CSE exp assert(exp->gtOper != GT_COUNT); /* Make sure we update the weighted ref count correctly */ m_pCompiler->optCSEweight = blk->getBBWeight(m_pCompiler); /* Figure out the actual type of the value */ var_types expTyp = genActualType(exp->TypeGet()); // The cseLclVarType must be a compatible with expTyp // ValueNumStore* vnStore = m_pCompiler->vnStore; noway_assert(IsCompatibleType(cseLclVarTyp, expTyp) || (dsc->csdConstDefVN != vnStore->VNForNull())); // This will contain the replacement tree for exp // It will either be the CSE def or CSE ref // GenTree* cse = nullptr; bool isDef; FieldSeqNode* fldSeq = nullptr; bool commaOnly = true; GenTree* effectiveExp = exp->gtEffectiveVal(commaOnly); const bool hasZeroMapAnnotation = m_pCompiler->GetZeroOffsetFieldMap()->Lookup(effectiveExp, &fldSeq); if (IS_CSE_USE(exp->gtCSEnum)) { /* This is a use of the CSE */ isDef = false; #ifdef DEBUG if (m_pCompiler->verbose) { printf("\nWorking on the replacement of the " FMT_CSE " use at ", exp->gtCSEnum); Compiler::printTreeID(exp); printf(" in " FMT_BB "\n", blk->bbNum); } #endif // DEBUG // We will replace the CSE ref with a new tree // this is typically just a simple use of the new CSE LclVar // // Create a reference to the CSE temp GenTree* cseLclVar = m_pCompiler->gtNewLclvNode(cseLclVarNum, cseLclVarTyp); cseLclVar->gtVNPair.SetBoth(dsc->csdConstDefVN); // Assign the ssa num for the lclvar use. Note it may be the reserved num. cseLclVar->AsLclVarCommon()->SetSsaNum(cseSsaNum); cse = cseLclVar; if (isSharedConst) { ValueNum currVN = m_pCompiler->vnStore->VNLiberalNormalValue(exp->gtVNPair); ssize_t curValue = m_pCompiler->vnStore->CoercedConstantValue<ssize_t>(currVN); ssize_t delta = curValue - dsc->csdConstDefValue; if (delta != 0) { GenTree* deltaNode = m_pCompiler->gtNewIconNode(delta, cseLclVarTyp); cse = m_pCompiler->gtNewOperNode(GT_ADD, cseLclVarTyp, cseLclVar, deltaNode); cse->SetDoNotCSE(); } } // assign the proper ValueNumber, A CSE use discards any exceptions cse->gtVNPair = vnStore->VNPNormalPair(exp->gtVNPair); // shared const CSE has the correct value number assigned // and both liberal and conservative are identical // and they do not use theConservativeVN // if (!isSharedConst) { ValueNum theConservativeVN = successfulCandidate->CseDsc()->defConservNormVN; if (theConservativeVN != ValueNumStore::NoVN) { // All defs of this CSE share the same normal conservative VN, and we are rewriting this // use to fetch the same value with no reload, so we can safely propagate that // conservative VN to this use. This can help range check elimination later on. cse->gtVNPair.SetConservative(theConservativeVN); // If the old VN was flagged as a checked bound, propagate that to the new VN // to make sure assertion prop will pay attention to this VN. ValueNum oldVN = exp->gtVNPair.GetConservative(); if (!vnStore->IsVNConstant(theConservativeVN) && vnStore->IsVNCheckedBound(oldVN)) { vnStore->SetVNIsCheckedBound(theConservativeVN); } GenTree* cmp; if ((m_pCompiler->optCseCheckedBoundMap != nullptr) && (m_pCompiler->optCseCheckedBoundMap->Lookup(exp, &cmp))) { // Propagate the new value number to this compare node as well, since // subsequent range check elimination will try to correlate it with // the other appearances that are getting CSEd. ValueNum oldCmpVN = cmp->gtVNPair.GetConservative(); ValueNum newCmpArgVN; ValueNumStore::CompareCheckedBoundArithInfo info; if (vnStore->IsVNCompareCheckedBound(oldCmpVN)) { // Comparison is against the bound directly. newCmpArgVN = theConservativeVN; vnStore->GetCompareCheckedBound(oldCmpVN, &info); } else { // Comparison is against the bound +/- some offset. assert(vnStore->IsVNCompareCheckedBoundArith(oldCmpVN)); vnStore->GetCompareCheckedBoundArithInfo(oldCmpVN, &info); newCmpArgVN = vnStore->VNForFunc(vnStore->TypeOfVN(info.arrOp), (VNFunc)info.arrOper, info.arrOp, theConservativeVN); } ValueNum newCmpVN = vnStore->VNForFunc(vnStore->TypeOfVN(oldCmpVN), (VNFunc)info.cmpOper, info.cmpOp, newCmpArgVN); cmp->gtVNPair.SetConservative(newCmpVN); } } } #ifdef DEBUG cse->gtDebugFlags |= GTF_DEBUG_VAR_CSE_REF; #endif // DEBUG // Now we need to unmark any nested CSE's uses that are found in 'exp' // As well we extract any nested CSE defs that are found in 'exp' and // these are appended to the sideEffList // Afterwards the set of nodes in the 'sideEffectList' are preserved and // all other nodes are removed. // exp->gtCSEnum = NO_CSE; // clear the gtCSEnum field GenTree* sideEffList = nullptr; m_pCompiler->gtExtractSideEffList(exp, &sideEffList, GTF_PERSISTENT_SIDE_EFFECTS | GTF_IS_IN_CSE); // If we have any side effects or extracted CSE defs then we need to create a GT_COMMA tree instead // if (sideEffList != nullptr) { #ifdef DEBUG if (m_pCompiler->verbose) { printf("\nThis CSE use has side effects and/or nested CSE defs. The sideEffectList:\n"); m_pCompiler->gtDispTree(sideEffList); printf("\n"); } #endif GenTree* cseVal = cse; GenTree* curSideEff = sideEffList; ValueNumPair exceptions_vnp = ValueNumStore::VNPForEmptyExcSet(); while ((curSideEff->OperGet() == GT_COMMA) || (curSideEff->OperGet() == GT_ASG)) { GenTree* op1 = curSideEff->AsOp()->gtOp1; GenTree* op2 = curSideEff->AsOp()->gtOp2; ValueNumPair op1vnp; ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet(); vnStore->VNPUnpackExc(op1->gtVNPair, &op1vnp, &op1Xvnp); exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op1Xvnp); curSideEff = op2; } // We may have inserted a narrowing cast during a previous remorph // and it will not have a value number. if ((curSideEff->OperGet() == GT_CAST) && !curSideEff->gtVNPair.BothDefined()) { // The inserted cast will have no exceptional effects assert(curSideEff->gtOverflow() == false); // Process the exception effects from the cast's operand. curSideEff = curSideEff->AsOp()->gtOp1; } ValueNumPair op2vnp; ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet(); vnStore->VNPUnpackExc(curSideEff->gtVNPair, &op2vnp, &op2Xvnp); exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op2Xvnp); op2Xvnp = ValueNumStore::VNPForEmptyExcSet(); vnStore->VNPUnpackExc(cseVal->gtVNPair, &op2vnp, &op2Xvnp); exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op2Xvnp); // Create a comma node with the sideEffList as op1 cse = m_pCompiler->gtNewOperNode(GT_COMMA, expTyp, sideEffList, cseVal); cse->gtVNPair = vnStore->VNPWithExc(op2vnp, exceptions_vnp); } } else { /* This is a def of the CSE */ isDef = true; #ifdef DEBUG if (m_pCompiler->verbose) { printf("\n" FMT_CSE " def at ", GET_CSE_INDEX(exp->gtCSEnum)); Compiler::printTreeID(exp); printf(" replaced in " FMT_BB " with def of V%02u\n", blk->bbNum, cseLclVarNum); } #endif // DEBUG GenTree* val = exp; if (isSharedConst) { ValueNum currVN = m_pCompiler->vnStore->VNLiberalNormalValue(exp->gtVNPair); ssize_t curValue = m_pCompiler->vnStore->CoercedConstantValue<ssize_t>(currVN); ssize_t delta = curValue - dsc->csdConstDefValue; if (delta != 0) { val = m_pCompiler->gtNewIconNode(dsc->csdConstDefValue, cseLclVarTyp); val->gtVNPair.SetBoth(dsc->csdConstDefVN); } } /* Create an assignment of the value to the temp */ GenTree* asg = m_pCompiler->gtNewTempAssign(cseLclVarNum, val); GenTree* origAsg = asg; if (!asg->OperIs(GT_ASG)) { // This can only be the case for a struct in which the 'val' was a COMMA, so // the assignment is sunk below it. asg = asg->gtEffectiveVal(true); noway_assert(origAsg->OperIs(GT_COMMA) && (origAsg == val)); } else { noway_assert(asg->AsOp()->gtOp2 == val); } // assign the proper Value Numbers asg->gtVNPair.SetBoth(ValueNumStore::VNForVoid()); // The GT_ASG node itself is $VN.Void asg->AsOp()->gtOp1->gtVNPair = val->gtVNPair; // The dest op is the same as 'val' noway_assert(asg->AsOp()->gtOp1->gtOper == GT_LCL_VAR); // Backpatch the SSA def, if we're putting this CSE temp into ssa. asg->AsOp()->gtOp1->AsLclVar()->SetSsaNum(cseSsaNum); // Move the information about the CSE def to the assignment; it // now indicates a completed CSE def instead of just a // candidate. optCSE_canSwap uses this information to reason // about evaluation order in between substitutions of CSE // defs/uses. asg->gtCSEnum = exp->gtCSEnum; exp->gtCSEnum = NO_CSE; if (cseSsaNum != SsaConfig::RESERVED_SSA_NUM) { LclSsaVarDsc* ssaVarDsc = m_pCompiler->lvaTable[cseLclVarNum].GetPerSsaData(cseSsaNum); // These should not have been set yet, since this is the first and // only def for this CSE. assert(ssaVarDsc->GetBlock() == nullptr); assert(ssaVarDsc->GetAssignment() == nullptr); ssaVarDsc->m_vnPair = val->gtVNPair; ssaVarDsc->SetBlock(blk); ssaVarDsc->SetAssignment(asg->AsOp()); } /* Create a reference to the CSE temp */ GenTree* cseLclVar = m_pCompiler->gtNewLclvNode(cseLclVarNum, cseLclVarTyp); cseLclVar->gtVNPair.SetBoth(dsc->csdConstDefVN); // Assign the ssa num for the lclvar use. Note it may be the reserved num. cseLclVar->AsLclVarCommon()->SetSsaNum(cseSsaNum); GenTree* cseUse = cseLclVar; if (isSharedConst) { ValueNum currVN = m_pCompiler->vnStore->VNLiberalNormalValue(exp->gtVNPair); ssize_t curValue = m_pCompiler->vnStore->CoercedConstantValue<ssize_t>(currVN); ssize_t delta = curValue - dsc->csdConstDefValue; if (delta != 0) { GenTree* deltaNode = m_pCompiler->gtNewIconNode(delta, cseLclVarTyp); cseUse = m_pCompiler->gtNewOperNode(GT_ADD, cseLclVarTyp, cseLclVar, deltaNode); cseUse->SetDoNotCSE(); } } cseUse->gtVNPair = val->gtVNPair; // The 'cseUse' is equal to 'val' /* Create a comma node for the CSE assignment */ cse = m_pCompiler->gtNewOperNode(GT_COMMA, expTyp, origAsg, cseUse); cse->gtVNPair = cseUse->gtVNPair; // The comma's value is the same as 'val' // as the assignment to the CSE LclVar // cannot add any new exceptions } cse->CopyReg(exp); // The cse inheirits any reg num property from the orginal exp node exp->ClearRegNum(); // The exp node (for a CSE def) no longer has a register requirement // Walk the statement 'stmt' and find the pointer // in the tree is pointing to 'exp' // Compiler::FindLinkData linkData = m_pCompiler->gtFindLink(stmt, exp); GenTree** link = linkData.result; #ifdef DEBUG if (link == nullptr) { printf("\ngtFindLink failed: stm="); Compiler::printStmtID(stmt); printf(", exp="); Compiler::printTreeID(exp); printf("\n"); printf("stm ="); m_pCompiler->gtDispStmt(stmt); printf("\n"); printf("exp ="); m_pCompiler->gtDispTree(exp); printf("\n"); } #endif // DEBUG noway_assert(link); // Mutate this link, thus replacing the old exp with the new CSE representation // *link = cse; // If it has a zero-offset field seq, copy annotation. if (hasZeroMapAnnotation) { m_pCompiler->fgAddFieldSeqForZeroOffset(cse, fldSeq); } assert(m_pCompiler->fgRemoveRestOfBlock == false); /* re-morph the statement */ m_pCompiler->fgMorphBlockStmt(blk, stmt DEBUGARG("optValnumCSE")); } while (lst != nullptr); } // Consider each of the CSE candidates and if the CSE passes // the PromotionCheck then transform the CSE by calling PerformCSE // void ConsiderCandidates() { /* Consider each CSE candidate, in order of decreasing cost */ unsigned cnt = m_pCompiler->optCSECandidateCount; Compiler::CSEdsc** ptr = sortTab; for (; (cnt > 0); cnt--, ptr++) { Compiler::CSEdsc* dsc = *ptr; CSE_Candidate candidate(this, dsc); if (dsc->defExcSetPromise == ValueNumStore::NoVN) { JITDUMP("Abandoned " FMT_CSE " because we had defs with different Exc sets\n", candidate.CseIndex()); continue; } if (dsc->csdStructHndMismatch) { JITDUMP("Abandoned " FMT_CSE " because we had mismatching struct handles\n", candidate.CseIndex()); continue; } candidate.InitializeCounts(); if (candidate.UseCount() == 0) { JITDUMP("Skipped " FMT_CSE " because use count is 0\n", candidate.CseIndex()); continue; } #ifdef DEBUG if (m_pCompiler->verbose) { if (!Compiler::Is_Shared_Const_CSE(dsc->csdHashKey)) { printf("\nConsidering " FMT_CSE " {$%-3x, $%-3x} [def=%3f, use=%3f, cost=%3u%s]\n", candidate.CseIndex(), dsc->csdHashKey, dsc->defExcSetPromise, candidate.DefCount(), candidate.UseCount(), candidate.Cost(), dsc->csdLiveAcrossCall ? ", call" : " "); } else { size_t kVal = Compiler::Decode_Shared_Const_CSE_Value(dsc->csdHashKey); printf("\nConsidering " FMT_CSE " {K_%p} [def=%3f, use=%3f, cost=%3u%s]\n", candidate.CseIndex(), dspPtr(kVal), candidate.DefCount(), candidate.UseCount(), candidate.Cost(), dsc->csdLiveAcrossCall ? ", call" : " "); } printf("CSE Expression : \n"); m_pCompiler->gtDispTree(candidate.Expr()); printf("\n"); } #endif // DEBUG if ((dsc->csdDefCount <= 0) || (dsc->csdUseCount == 0)) { // If we reach this point, then the CSE def was incorrectly marked or the // block with this use is unreachable. So skip and go to the next CSE. // Without the "continue", we'd generate bad code in retail. // Commented out a noway_assert(false) here due to bug: 3290124. // The problem is if there is sub-graph that is not reachable from the // entry point, the CSE flags propagated, would be incorrect for it. continue; } bool doCSE = PromotionCheck(&candidate); #ifdef DEBUG if (m_pCompiler->verbose) { if (doCSE) { printf("\nPromoting CSE:\n"); } else { printf("Did Not promote this CSE\n"); } } #endif // DEBUG if (doCSE) { PerformCSE(&candidate); } } } // Perform the necessary cleanup after our CSE heuristics have run // void Cleanup() { // Nothing to do, currently. } }; /***************************************************************************** * * Routine for performing the Value Number based CSE using our heuristics */ void Compiler::optValnumCSE_Heuristic() { #ifdef DEBUG if (verbose) { printf("\n************ Trees at start of optValnumCSE_Heuristic()\n"); fgDumpTrees(fgFirstBB, nullptr); printf("\n"); } #endif // DEBUG CSE_Heuristic cse_heuristic(this); cse_heuristic.Initialize(); cse_heuristic.SortCandidates(); cse_heuristic.ConsiderCandidates(); cse_heuristic.Cleanup(); } /***************************************************************************** * * Perform common sub-expression elimination. */ void Compiler::optOptimizeValnumCSEs() { #ifdef DEBUG if (optConfigDisableCSE()) { return; // Disabled by JitNoCSE } #endif optValnumCSE_phase = true; optCSEweight = -1.0f; optValnumCSE_Init(); if (optValnumCSE_Locate()) { optValnumCSE_InitDataFlow(); optValnumCSE_DataFlow(); optValnumCSE_Availablity(); optValnumCSE_Heuristic(); } optValnumCSE_phase = false; } /***************************************************************************** * * The following determines whether the given expression is a worthy CSE * candidate. */ bool Compiler::optIsCSEcandidate(GenTree* tree) { /* No good if the expression contains side effects or if it was marked as DONT CSE */ if (tree->gtFlags & (GTF_ASG | GTF_DONT_CSE)) { return false; } var_types type = tree->TypeGet(); genTreeOps oper = tree->OperGet(); if (type == TYP_VOID) { return false; } // If this is a struct type (including SIMD*), we can only consider it for CSE-ing // if we can get its handle, so that we can create a temp. if (varTypeIsStruct(type) && (gtGetStructHandleIfPresent(tree) == NO_CLASS_HANDLE)) { return false; } unsigned cost; if (compCodeOpt() == SMALL_CODE) { cost = tree->GetCostSz(); } else { cost = tree->GetCostEx(); } /* Don't bother if the potential savings are very low */ if (cost < MIN_CSE_COST) { return false; } #if !CSE_CONSTS /* Don't bother with constants */ if (tree->OperIsConst()) { return false; } #endif /* Check for some special cases */ switch (oper) { case GT_CALL: GenTreeCall* call; call = tree->AsCall(); // Don't mark calls to allocation helpers as CSE candidates. // Marking them as CSE candidates usually blocks CSEs rather than enables them. // A typical case is: // [1] GT_IND(x) = GT_CALL ALLOC_HELPER // ... // [2] y = GT_IND(x) // ... // [3] z = GT_IND(x) // If we mark CALL ALLOC_HELPER as a CSE candidate, we later discover // that it can't be a CSE def because GT_INDs in [2] and [3] can cause // more exceptions (NullRef) so we abandon this CSE. // If we don't mark CALL ALLOC_HELPER as a CSE candidate, we are able // to use GT_IND(x) in [2] as a CSE def. if ((call->gtCallType == CT_HELPER) && s_helperCallProperties.IsAllocator(eeGetHelperNum(call->gtCallMethHnd))) { return false; } // If we have a simple helper call with no other persistent side-effects // then we allow this tree to be a CSE candidate // if (gtTreeHasSideEffects(tree, GTF_PERSISTENT_SIDE_EFFECTS | GTF_IS_IN_CSE) == false) { return true; } else { // Calls generally cannot be CSE-ed return false; } case GT_IND: // TODO-CQ: Review this... /* We try to cse GT_ARR_ELEM nodes instead of GT_IND(GT_ARR_ELEM). Doing the first allows cse to also kick in for code like "GT_IND(GT_ARR_ELEM) = GT_IND(GT_ARR_ELEM) + xyz", whereas doing the second would not allow it */ return (tree->AsOp()->gtOp1->gtOper != GT_ARR_ELEM); case GT_CNS_LNG: #ifndef TARGET_64BIT return false; // Don't CSE 64-bit constants on 32-bit platforms #endif case GT_CNS_INT: case GT_CNS_DBL: case GT_CNS_STR: return true; // We reach here only when CSE_CONSTS is enabled case GT_ARR_ELEM: case GT_ARR_LENGTH: case GT_CLS_VAR: case GT_LCL_FLD: return true; case GT_LCL_VAR: return false; // Can't CSE a volatile LCL_VAR case GT_NEG: case GT_NOT: case GT_BSWAP: case GT_BSWAP16: case GT_CAST: return true; // CSE these Unary Operators case GT_SUB: case GT_DIV: case GT_MOD: case GT_UDIV: case GT_UMOD: case GT_OR: case GT_AND: case GT_XOR: case GT_RSH: case GT_RSZ: case GT_ROL: case GT_ROR: return true; // CSE these Binary Operators case GT_ADD: // Check for ADDRMODE flag on these Binary Operators case GT_MUL: case GT_LSH: if ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0) { return false; } return true; case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GE: case GT_GT: return true; // Allow the CSE of Comparison operators #ifdef FEATURE_SIMD case GT_SIMD: return true; // allow SIMD intrinsics to be CSE-ed #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: { GenTreeHWIntrinsic* hwIntrinsicNode = tree->AsHWIntrinsic(); assert(hwIntrinsicNode != nullptr); HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(hwIntrinsicNode->GetHWIntrinsicId()); switch (category) { #ifdef TARGET_XARCH case HW_Category_SimpleSIMD: case HW_Category_IMM: case HW_Category_Scalar: case HW_Category_SIMDScalar: case HW_Category_Helper: break; #elif defined(TARGET_ARM64) case HW_Category_SIMD: case HW_Category_SIMDByIndexedElement: case HW_Category_ShiftLeftByImmediate: case HW_Category_ShiftRightByImmediate: case HW_Category_Scalar: case HW_Category_Helper: break; #endif case HW_Category_MemoryLoad: case HW_Category_MemoryStore: case HW_Category_Special: default: return false; } if (hwIntrinsicNode->OperIsMemoryStore()) { // NI_BMI2_MultiplyNoFlags, etc... return false; } if (hwIntrinsicNode->OperIsMemoryLoad()) { // NI_AVX2_BroadcastScalarToVector128, NI_AVX2_GatherVector128, etc... return false; } return true; // allow Hardware Intrinsics to be CSE-ed } #endif // FEATURE_HW_INTRINSICS case GT_INTRINSIC: return true; // allow Intrinsics to be CSE-ed case GT_OBJ: return varTypeIsEnregisterable(type); // Allow enregisterable GT_OBJ's to be CSE-ed. (i.e. SIMD types) case GT_COMMA: return true; // Allow GT_COMMA nodes to be CSE-ed. case GT_COLON: case GT_QMARK: case GT_NOP: case GT_RETURN: return false; // Currently the only special nodes that we hit // that we know that we don't want to CSE default: break; // Any new nodes that we might add later... } return false; } #ifdef DEBUG // // A Debug only method that allows you to control whether the CSE logic is enabled for this method. // // If this method returns false then the CSE phase should be performed. // If the method returns true then the CSE phase should be skipped. // bool Compiler::optConfigDisableCSE() { // Next check if COMPlus_JitNoCSE is set and applies to this method // unsigned jitNoCSE = JitConfig.JitNoCSE(); if (jitNoCSE > 0) { unsigned methodCount = Compiler::jitTotalMethodCompiled; if ((jitNoCSE & 0xF000000) == 0xF000000) { unsigned methodCountMask = methodCount & 0xFFF; unsigned bitsZero = (jitNoCSE >> 12) & 0xFFF; unsigned bitsOne = (jitNoCSE >> 0) & 0xFFF; if (((methodCountMask & bitsOne) == bitsOne) && ((~methodCountMask & bitsZero) == bitsZero)) { if (verbose) { printf(" Disabled by JitNoCSE methodCountMask\n"); } return true; // The CSE phase for this method is disabled } } else if (jitNoCSE <= (methodCount + 1)) { if (verbose) { printf(" Disabled by JitNoCSE > methodCount\n"); } return true; // The CSE phase for this method is disabled } } return false; } // // A Debug only method that allows you to control whether the CSE logic is enabled for // a particular CSE in a method // // If this method returns false then the CSE should be performed. // If the method returns true then the CSE should be skipped. // bool Compiler::optConfigDisableCSE2() { static unsigned totalCSEcount = 0; unsigned jitNoCSE2 = JitConfig.JitNoCSE2(); totalCSEcount++; if (jitNoCSE2 > 0) { if ((jitNoCSE2 & 0xF000000) == 0xF000000) { unsigned totalCSEMask = totalCSEcount & 0xFFF; unsigned bitsZero = (jitNoCSE2 >> 12) & 0xFFF; unsigned bitsOne = (jitNoCSE2 >> 0) & 0xFFF; if (((totalCSEMask & bitsOne) == bitsOne) && ((~totalCSEMask & bitsZero) == bitsZero)) { if (verbose) { printf(" Disabled by jitNoCSE2 Ones/Zeros mask\n"); } return true; } } else if ((jitNoCSE2 & 0xF000000) == 0xE000000) { unsigned totalCSEMask = totalCSEcount & 0xFFF; unsigned disableMask = jitNoCSE2 & 0xFFF; disableMask >>= (totalCSEMask % 12); if (disableMask & 1) { if (verbose) { printf(" Disabled by jitNoCSE2 rotating disable mask\n"); } return true; } } else if (jitNoCSE2 <= totalCSEcount) { if (verbose) { printf(" Disabled by jitNoCSE2 > totalCSEcount\n"); } return true; } } return false; } #endif void Compiler::optOptimizeCSEs() { if (optCSEstart != BAD_VAR_NUM) { // CSE being run multiple times so we may need to clean up old // information. optCleanupCSEs(); } optCSECandidateCount = 0; optCSEstart = lvaCount; INDEBUG(optEnsureClearCSEInfo()); optOptimizeValnumCSEs(); } /***************************************************************************** * * Cleanup after CSE to allow us to run more than once. */ void Compiler::optCleanupCSEs() { // We must clear the BBF_VISITED and BBF_MARKED flags. for (BasicBlock* const block : Blocks()) { // And clear all the "visited" bits on the block. block->bbFlags &= ~(BBF_VISITED | BBF_MARKED); // Walk the statement trees in this basic block. for (Statement* const stmt : block->NonPhiStatements()) { // We must clear the gtCSEnum field. for (GenTree* tree = stmt->GetRootNode(); tree; tree = tree->gtPrev) { tree->gtCSEnum = NO_CSE; } } } } #ifdef DEBUG /***************************************************************************** * * Ensure that all the CSE information in the IR is initialized the way we expect it, * before running a CSE phase. This is basically an assert that optCleanupCSEs() is not needed. */ void Compiler::optEnsureClearCSEInfo() { for (BasicBlock* const block : Blocks()) { assert((block->bbFlags & (BBF_VISITED | BBF_MARKED)) == 0); for (Statement* const stmt : block->NonPhiStatements()) { for (GenTree* tree = stmt->GetRootNode(); tree; tree = tree->gtPrev) { assert(tree->gtCSEnum == NO_CSE); } } } } //------------------------------------------------------------------------ // optPrintCSEDataFlowSet: Print out one of the CSE dataflow sets bbCseGen, bbCseIn, bbCseOut, // interpreting the bits in a more useful way for the dump. // // Arguments: // cseDataFlowSet - One of the dataflow sets to display // includeBits - Display the actual bits of the set as well // void Compiler::optPrintCSEDataFlowSet(EXPSET_VALARG_TP cseDataFlowSet, bool includeBits /* = true */) { if (includeBits) { printf("%s ", genES2str(cseLivenessTraits, cseDataFlowSet)); } bool first = true; for (unsigned cseIndex = 1; cseIndex <= optCSECandidateCount; cseIndex++) { unsigned cseAvailBit = getCSEAvailBit(cseIndex); unsigned cseAvailCrossCallBit = getCSEAvailCrossCallBit(cseIndex); if (BitVecOps::IsMember(cseLivenessTraits, cseDataFlowSet, cseAvailBit)) { if (!first) { printf(", "); } const bool isAvailCrossCall = BitVecOps::IsMember(cseLivenessTraits, cseDataFlowSet, cseAvailCrossCallBit); printf(FMT_CSE "%s", cseIndex, isAvailCrossCall ? ".c" : ""); first = false; } } } #endif // DEBUG
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/mono/mono/component/debugger-protocol.h
#ifndef __MONO_DEBUGGER_PROTOCOL_H__ #define __MONO_DEBUGGER_PROTOCOL_H__ #include <stdint.h> #define HEADER_LENGTH 11 #define REPLY_PACKET 0x80 /* * Wire Protocol definitions */ #define MAJOR_VERSION 2 #define MINOR_VERSION 60 typedef enum { MDBGPROT_CMD_COMPOSITE = 100 } MdbgProtCmdComposite; typedef enum { MDBGPROT_CMD_VM_VERSION = 1, MDBGPROT_CMD_VM_ALL_THREADS = 2, MDBGPROT_CMD_VM_SUSPEND = 3, MDBGPROT_CMD_VM_RESUME = 4, MDBGPROT_CMD_VM_EXIT = 5, MDBGPROT_CMD_VM_DISPOSE = 6, MDBGPROT_CMD_VM_INVOKE_METHOD = 7, MDBGPROT_CMD_VM_SET_PROTOCOL_VERSION = 8, MDBGPROT_CMD_VM_ABORT_INVOKE = 9, MDBGPROT_CMD_VM_SET_KEEPALIVE = 10, MDBGPROT_CMD_VM_GET_TYPES_FOR_SOURCE_FILE = 11, MDBGPROT_CMD_VM_GET_TYPES = 12, MDBGPROT_CMD_VM_INVOKE_METHODS = 13, MDBGPROT_CMD_VM_START_BUFFERING = 14, MDBGPROT_CMD_VM_STOP_BUFFERING = 15, MDBGPROT_CMD_VM_READ_MEMORY = 16, MDBGPROT_CMD_VM_WRITE_MEMORY = 17, MDBGPROT_CMD_GET_ASSEMBLY_BY_NAME = 18, MDBGPROT_CMD_GET_MODULE_BY_GUID = 19 } MdbgProtCmdVM; typedef enum { MDBGPROT_CMD_SET_VM = 1, MDBGPROT_CMD_SET_OBJECT_REF = 9, MDBGPROT_CMD_SET_STRING_REF = 10, MDBGPROT_CMD_SET_THREAD = 11, MDBGPROT_CMD_SET_ARRAY_REF = 13, MDBGPROT_CMD_SET_EVENT_REQUEST = 15, MDBGPROT_CMD_SET_STACK_FRAME = 16, MDBGPROT_CMD_SET_APPDOMAIN = 20, MDBGPROT_CMD_SET_ASSEMBLY = 21, MDBGPROT_CMD_SET_METHOD = 22, MDBGPROT_CMD_SET_TYPE = 23, MDBGPROT_CMD_SET_MODULE = 24, MDBGPROT_CMD_SET_FIELD = 25, MDBGPROT_CMD_SET_EVENT = 64, MDBGPROT_CMD_SET_POINTER = 65 } MdbgProtCommandSet; typedef enum { MDBGPROT_ERR_NONE = 0, MDBGPROT_ERR_INVALID_OBJECT = 20, MDBGPROT_ERR_INVALID_FIELDID = 25, MDBGPROT_ERR_INVALID_FRAMEID = 30, MDBGPROT_ERR_NOT_IMPLEMENTED = 100, MDBGPROT_ERR_NOT_SUSPENDED = 101, MDBGPROT_ERR_INVALID_ARGUMENT = 102, MDBGPROT_ERR_UNLOADED = 103, MDBGPROT_ERR_NO_INVOCATION = 104, MDBGPROT_ERR_ABSENT_INFORMATION = 105, MDBGPROT_ERR_NO_SEQ_POINT_AT_IL_OFFSET = 106, MDBGPROT_ERR_INVOKE_ABORTED = 107, MDBGPROT_ERR_LOADER_ERROR = 200, /*XXX extend the protocol to pass this information down the pipe */ } MdbgProtErrorCode; typedef enum { MDBGPROT_TOKEN_TYPE_STRING = 0, MDBGPROT_TOKEN_TYPE_TYPE = 1, MDBGPROT_TOKEN_TYPE_FIELD = 2, MDBGPROT_TOKEN_TYPE_METHOD = 3, MDBGPROT_TOKEN_TYPE_UNKNOWN = 4 } MdbgProtDebuggerTokenType; typedef enum { MDBGPROT_VALUE_TYPE_ID_NULL = 0xf0, MDBGPROT_VALUE_TYPE_ID_TYPE = 0xf1, MDBGPROT_VALUE_TYPE_ID_PARENT_VTYPE = 0xf2, MDBGPROT_VALUE_TYPE_ID_FIXED_ARRAY = 0xf3 } MdbgProtValueTypeId; typedef enum { MDBGPROT_FRAME_FLAG_DEBUGGER_INVOKE = 1, MDBGPROT_FRAME_FLAG_NATIVE_TRANSITION = 2 } MdbgProtStackFrameFlags; typedef enum { MDBGPROT_INVOKE_FLAG_DISABLE_BREAKPOINTS = 1, MDBGPROT_INVOKE_FLAG_SINGLE_THREADED = 2, MDBGPROT_INVOKE_FLAG_RETURN_OUT_THIS = 4, MDBGPROT_INVOKE_FLAG_RETURN_OUT_ARGS = 8, MDBGPROT_INVOKE_FLAG_VIRTUAL = 16 } MdbgProtInvokeFlags; typedef enum { BINDING_FLAGS_IGNORE_CASE = 0x70000000, } MdbgProtBindingFlagsExtensions; typedef enum { MDBGPROT_CMD_THREAD_GET_FRAME_INFO = 1, MDBGPROT_CMD_THREAD_GET_NAME = 2, MDBGPROT_CMD_THREAD_GET_STATE = 3, MDBGPROT_CMD_THREAD_GET_INFO = 4, MDBGPROT_CMD_THREAD_GET_ID = 5, MDBGPROT_CMD_THREAD_GET_TID = 6, MDBGPROT_CMD_THREAD_SET_IP = 7, MDBGPROT_CMD_THREAD_ELAPSED_TIME = 8, MDBGPROT_CMD_THREAD_GET_APPDOMAIN = 9, MDBGPROT_CMD_THREAD_GET_CONTEXT = 10, MDBGPROT_CMD_THREAD_SET_CONTEXT = 11 } MdbgProtCmdThread; typedef enum { MDBGPROT_CMD_APPDOMAIN_GET_ROOT_DOMAIN = 1, MDBGPROT_CMD_APPDOMAIN_GET_FRIENDLY_NAME = 2, MDBGPROT_CMD_APPDOMAIN_GET_ASSEMBLIES = 3, MDBGPROT_CMD_APPDOMAIN_GET_ENTRY_ASSEMBLY = 4, MDBGPROT_CMD_APPDOMAIN_CREATE_STRING = 5, MDBGPROT_CMD_APPDOMAIN_GET_CORLIB = 6, MDBGPROT_CMD_APPDOMAIN_CREATE_BOXED_VALUE = 7, MDBGPROT_CMD_APPDOMAIN_CREATE_BYTE_ARRAY = 8, } MdbgProtCmdAppDomain; typedef enum { MDBGPROT_CMD_ASSEMBLY_GET_LOCATION = 1, MDBGPROT_CMD_ASSEMBLY_GET_ENTRY_POINT = 2, MDBGPROT_CMD_ASSEMBLY_GET_MANIFEST_MODULE = 3, MDBGPROT_CMD_ASSEMBLY_GET_OBJECT = 4, MDBGPROT_CMD_ASSEMBLY_GET_TYPE = 5, MDBGPROT_CMD_ASSEMBLY_GET_NAME = 6, MDBGPROT_CMD_ASSEMBLY_GET_DOMAIN = 7, MDBGPROT_CMD_ASSEMBLY_GET_METADATA_BLOB = 8, MDBGPROT_CMD_ASSEMBLY_GET_IS_DYNAMIC = 9, MDBGPROT_CMD_ASSEMBLY_GET_PDB_BLOB = 10, MDBGPROT_CMD_ASSEMBLY_GET_TYPE_FROM_TOKEN = 11, MDBGPROT_CMD_ASSEMBLY_GET_METHOD_FROM_TOKEN = 12, MDBGPROT_CMD_ASSEMBLY_HAS_DEBUG_INFO = 13, MDBGPROT_CMD_ASSEMBLY_GET_CATTRS = 14, MDBGPROT_CMD_ASSEMBLY_GET_CUSTOM_ATTRIBUTES = 15, MDBGPROT_CMD_ASSEMBLY_GET_PEIMAGE_ADDRESS = 16, } MdbgProtCmdAssembly; typedef enum { MDBGPROT_CMD_MODULE_GET_INFO = 1, MDBGPROT_CMD_MODULE_APPLY_CHANGES = 2 } MdbgProtCmdModule; typedef enum { MDBGPROT_CMD_FIELD_GET_INFO = 1, } MdbgProtCmdField; typedef enum { MDBGPROT_CMD_PROPERTY_GET_INFO = 1, } MdbgProtCmdProperty; typedef enum { MDBGPROT_CMD_METHOD_GET_NAME = 1, MDBGPROT_CMD_METHOD_GET_DECLARING_TYPE = 2, MDBGPROT_CMD_METHOD_GET_DEBUG_INFO = 3, MDBGPROT_CMD_METHOD_GET_PARAM_INFO = 4, MDBGPROT_CMD_METHOD_GET_LOCALS_INFO = 5, MDBGPROT_CMD_METHOD_GET_INFO = 6, MDBGPROT_CMD_METHOD_GET_BODY = 7, MDBGPROT_CMD_METHOD_RESOLVE_TOKEN = 8, MDBGPROT_CMD_METHOD_GET_CATTRS = 9, MDBGPROT_CMD_METHOD_MAKE_GENERIC_METHOD = 10, MDBGPROT_CMD_METHOD_TOKEN = 11, MDBGPROT_CMD_METHOD_ASSEMBLY = 12, MDBGPROT_CMD_METHOD_GET_CLASS_TOKEN = 13, MDBGPROT_CMD_METHOD_HAS_ASYNC_DEBUG_INFO = 14, MDBGPROT_CMD_METHOD_GET_NAME_FULL = 15 } MdbgProtCmdMethod; typedef enum { MDBGPROT_CMD_TYPE_GET_INFO = 1, MDBGPROT_CMD_TYPE_GET_METHODS = 2, MDBGPROT_CMD_TYPE_GET_FIELDS = 3, MDBGPROT_CMD_TYPE_GET_VALUES = 4, MDBGPROT_CMD_TYPE_GET_OBJECT = 5, MDBGPROT_CMD_TYPE_GET_SOURCE_FILES = 6, MDBGPROT_CMD_TYPE_SET_VALUES = 7, MDBGPROT_CMD_TYPE_IS_ASSIGNABLE_FROM = 8, MDBGPROT_CMD_TYPE_GET_PROPERTIES = 9, MDBGPROT_CMD_TYPE_GET_CATTRS = 10, MDBGPROT_CMD_TYPE_GET_FIELD_CATTRS = 11, MDBGPROT_CMD_TYPE_GET_PROPERTY_CATTRS = 12, MDBGPROT_CMD_TYPE_GET_SOURCE_FILES_2 = 13, MDBGPROT_CMD_TYPE_GET_VALUES_2 = 14, MDBGPROT_CMD_TYPE_GET_METHODS_BY_NAME_FLAGS = 15, MDBGPROT_CMD_TYPE_GET_INTERFACES = 16, MDBGPROT_CMD_TYPE_GET_INTERFACE_MAP = 17, MDBGPROT_CMD_TYPE_IS_INITIALIZED = 18, MDBGPROT_CMD_TYPE_CREATE_INSTANCE = 19, MDBGPROT_CMD_TYPE_GET_VALUE_SIZE = 20, MDBGPROT_CMD_TYPE_GET_VALUES_ICORDBG = 21, MDBGPROT_CMD_TYPE_GET_PARENTS = 22, MDBGPROT_CMD_TYPE_INITIALIZE = 23 } MdbgProtCmdType; typedef enum { MDBGPROT_CMD_STACK_FRAME_GET_VALUES = 1, MDBGPROT_CMD_STACK_FRAME_GET_THIS = 2, MDBGPROT_CMD_STACK_FRAME_SET_VALUES = 3, MDBGPROT_CMD_STACK_FRAME_GET_DOMAIN = 4, MDBGPROT_CMD_STACK_FRAME_SET_THIS = 5, MDBGPROT_CMD_STACK_FRAME_GET_ARGUMENT = 6, MDBGPROT_CMD_STACK_FRAME_GET_ARGUMENTS = 7 } MdbgProtCmdStackFrame; typedef enum { MDBGPROT_CMD_ARRAY_REF_GET_LENGTH = 1, MDBGPROT_CMD_ARRAY_REF_GET_VALUES = 2, MDBGPROT_CMD_ARRAY_REF_SET_VALUES = 3, MDBGPROT_CMD_ARRAY_REF_GET_TYPE = 4 } MdbgProtCmdArray; typedef enum { MDBGPROT_CMD_STRING_REF_GET_VALUE = 1, MDBGPROT_CMD_STRING_REF_GET_LENGTH = 2, MDBGPROT_CMD_STRING_REF_GET_CHARS = 3 } MdbgProtCmdString; typedef enum { MDBGPROT_CMD_POINTER_GET_VALUE = 1 } MdbgProtCmdPointer; typedef enum { MDBGPROT_CMD_OBJECT_REF_GET_TYPE = 1, MDBGPROT_CMD_OBJECT_REF_GET_VALUES = 2, MDBGPROT_CMD_OBJECT_REF_IS_COLLECTED = 3, MDBGPROT_CMD_OBJECT_REF_GET_ADDRESS = 4, MDBGPROT_CMD_OBJECT_REF_GET_DOMAIN = 5, MDBGPROT_CMD_OBJECT_REF_SET_VALUES = 6, MDBGPROT_CMD_OBJECT_REF_GET_INFO = 7, MDBGPROT_CMD_OBJECT_REF_GET_VALUES_ICORDBG = 8, MDBGPROT_CMD_OBJECT_REF_DELEGATE_GET_METHOD = 9, MDBGPROT_CMD_OBJECT_IS_DELEGATE = 10 } MdbgProtCmdObject; typedef enum { MDBGPROT_SUSPEND_POLICY_NONE = 0, MDBGPROT_SUSPEND_POLICY_EVENT_THREAD = 1, MDBGPROT_SUSPEND_POLICY_ALL = 2 } MdbgProtSuspendPolicy; typedef enum { MDBGPROT_CMD_EVENT_REQUEST_SET = 1, MDBGPROT_CMD_EVENT_REQUEST_CLEAR = 2, MDBGPROT_CMD_EVENT_REQUEST_CLEAR_ALL_BREAKPOINTS = 3 } MdbgProtCmdEvent; typedef struct { uint8_t *buf, *p, *end; } MdbgProtBuffer; typedef struct { int len; int id; int flags; int command_set; int command; int error; int error_2; } MdbgProtHeader; typedef struct ReplyPacket { int id; int error; MdbgProtBuffer *data; } MdbgProtReplyPacket; typedef enum { MDBGPROT_EVENT_KIND_VM_START = 0, MDBGPROT_EVENT_KIND_VM_DEATH = 1, MDBGPROT_EVENT_KIND_THREAD_START = 2, MDBGPROT_EVENT_KIND_THREAD_DEATH = 3, MDBGPROT_EVENT_KIND_APPDOMAIN_CREATE = 4, MDBGPROT_EVENT_KIND_APPDOMAIN_UNLOAD = 5, MDBGPROT_EVENT_KIND_METHOD_ENTRY = 6, MDBGPROT_EVENT_KIND_METHOD_EXIT = 7, MDBGPROT_EVENT_KIND_ASSEMBLY_LOAD = 8, MDBGPROT_EVENT_KIND_ASSEMBLY_UNLOAD = 9, MDBGPROT_EVENT_KIND_BREAKPOINT = 10, MDBGPROT_EVENT_KIND_STEP = 11, MDBGPROT_EVENT_KIND_TYPE_LOAD = 12, MDBGPROT_EVENT_KIND_EXCEPTION = 13, MDBGPROT_EVENT_KIND_KEEPALIVE = 14, MDBGPROT_EVENT_KIND_USER_BREAK = 15, MDBGPROT_EVENT_KIND_USER_LOG = 16, MDBGPROT_EVENT_KIND_CRASH = 17, MDBGPROT_EVENT_KIND_ENC_UPDATE = 18, MDBGPROT_EVENT_KIND_METHOD_UPDATE = 19, } MdbgProtEventKind; typedef enum { MDBGPROT_MOD_KIND_COUNT = 1, MDBGPROT_MOD_KIND_THREAD_ONLY = 3, MDBGPROT_MOD_KIND_LOCATION_ONLY = 7, MDBGPROT_MOD_KIND_EXCEPTION_ONLY = 8, MDBGPROT_MOD_KIND_STEP = 10, MDBGPROT_MOD_KIND_ASSEMBLY_ONLY = 11, MDBGPROT_MOD_KIND_SOURCE_FILE_ONLY = 12, MDBGPROT_MOD_KIND_TYPE_NAME_ONLY = 13, MDBGPROT_MOD_KIND_NONE = 14 } MdbgProtModifierKind; typedef enum { MDBGPROT_STEP_DEPTH_INTO = 0, MDBGPROT_STEP_DEPTH_OVER = 1, MDBGPROT_STEP_DEPTH_OUT = 2 } MdbgProtStepDepth; typedef enum { MDBGPROT_STEP_SIZE_MIN = 0, MDBGPROT_STEP_SIZE_LINE = 1 } MdbgProtStepSize; typedef enum { MDBGPROT_STEP_FILTER_NONE = 0, MDBGPROT_STEP_FILTER_STATIC_CTOR = 1, MDBGPROT_STEP_FILTER_DEBUGGER_HIDDEN = 2, MDBGPROT_STEP_FILTER_DEBUGGER_STEP_THROUGH = 4, MDBGPROT_STEP_FILTER_DEBUGGER_NON_USER_CODE = 8 } MdbgProtStepFilter; /* * IDS */ typedef enum { ID_ASSEMBLY = 0, ID_MODULE = 1, ID_TYPE = 2, ID_METHOD = 3, ID_FIELD = 4, ID_DOMAIN = 5, ID_PROPERTY = 6, ID_PARAMETER = 7, ID_NUM } IdType; int m_dbgprot_buffer_add_command_header (MdbgProtBuffer *recvbuf, int cmd_set, int cmd, MdbgProtBuffer *out); void m_dbgprot_decode_command_header (MdbgProtBuffer *recvbuf, MdbgProtHeader *header); /* * Functions to decode protocol data */ int m_dbgprot_decode_byte (uint8_t *buf, uint8_t **endbuf, uint8_t *limit); int m_dbgprot_decode_int (uint8_t *buf, uint8_t **endbuf, uint8_t *limit); int64_t m_dbgprot_decode_long (uint8_t *buf, uint8_t **endbuf, uint8_t *limit); int m_dbgprot_decode_id (uint8_t *buf, uint8_t **endbuf, uint8_t *limit); char* m_dbgprot_decode_string (uint8_t *buf, uint8_t **endbuf, uint8_t *limit); char* m_dbgprot_decode_string_with_len(uint8_t* buf, uint8_t** endbuf, uint8_t* limit, int *len); uint8_t* m_dbgprot_decode_byte_array(uint8_t *buf, uint8_t **endbuf, uint8_t *limit, int32_t *len); /* * Functions to encode protocol data */ void m_dbgprot_buffer_init (MdbgProtBuffer *buf, uint32_t size); uint32_t m_dbgprot_buffer_len (MdbgProtBuffer *buf); void m_dbgprot_buffer_make_room (MdbgProtBuffer *buf, uint32_t size); void m_dbgprot_buffer_add_byte (MdbgProtBuffer *buf, uint8_t val); void m_dbgprot_buffer_add_short (MdbgProtBuffer *buf, uint32_t val); void m_dbgprot_buffer_add_int (MdbgProtBuffer *buf, uint32_t val); void m_dbgprot_buffer_add_long (MdbgProtBuffer *buf, uint64_t l); void m_dbgprot_buffer_add_id (MdbgProtBuffer *buf, uint32_t id); void m_dbgprot_buffer_add_data (MdbgProtBuffer *buf, uint8_t *data, uint32_t len); void m_dbgprot_buffer_add_utf16 (MdbgProtBuffer *buf, uint8_t *data, uint32_t len); void m_dbgprot_buffer_add_string (MdbgProtBuffer *buf, const char *str); void m_dbgprot_buffer_add_byte_array (MdbgProtBuffer *buf, uint8_t *bytes, uint32_t arr_len); void m_dbgprot_buffer_add_buffer (MdbgProtBuffer *buf, MdbgProtBuffer *data); void m_dbgprot_buffer_free (MdbgProtBuffer *buf); const char* m_dbgprot_event_to_string (MdbgProtEventKind event); #endif
#ifndef __MONO_DEBUGGER_PROTOCOL_H__ #define __MONO_DEBUGGER_PROTOCOL_H__ #include <stdint.h> #define HEADER_LENGTH 11 #define REPLY_PACKET 0x80 /* * Wire Protocol definitions */ #define MAJOR_VERSION 2 #define MINOR_VERSION 60 typedef enum { MDBGPROT_CMD_COMPOSITE = 100 } MdbgProtCmdComposite; typedef enum { MDBGPROT_CMD_VM_VERSION = 1, MDBGPROT_CMD_VM_ALL_THREADS = 2, MDBGPROT_CMD_VM_SUSPEND = 3, MDBGPROT_CMD_VM_RESUME = 4, MDBGPROT_CMD_VM_EXIT = 5, MDBGPROT_CMD_VM_DISPOSE = 6, MDBGPROT_CMD_VM_INVOKE_METHOD = 7, MDBGPROT_CMD_VM_SET_PROTOCOL_VERSION = 8, MDBGPROT_CMD_VM_ABORT_INVOKE = 9, MDBGPROT_CMD_VM_SET_KEEPALIVE = 10, MDBGPROT_CMD_VM_GET_TYPES_FOR_SOURCE_FILE = 11, MDBGPROT_CMD_VM_GET_TYPES = 12, MDBGPROT_CMD_VM_INVOKE_METHODS = 13, MDBGPROT_CMD_VM_START_BUFFERING = 14, MDBGPROT_CMD_VM_STOP_BUFFERING = 15, MDBGPROT_CMD_VM_READ_MEMORY = 16, MDBGPROT_CMD_VM_WRITE_MEMORY = 17, MDBGPROT_CMD_GET_ASSEMBLY_BY_NAME = 18, MDBGPROT_CMD_GET_MODULE_BY_GUID = 19 } MdbgProtCmdVM; typedef enum { MDBGPROT_CMD_SET_VM = 1, MDBGPROT_CMD_SET_OBJECT_REF = 9, MDBGPROT_CMD_SET_STRING_REF = 10, MDBGPROT_CMD_SET_THREAD = 11, MDBGPROT_CMD_SET_ARRAY_REF = 13, MDBGPROT_CMD_SET_EVENT_REQUEST = 15, MDBGPROT_CMD_SET_STACK_FRAME = 16, MDBGPROT_CMD_SET_APPDOMAIN = 20, MDBGPROT_CMD_SET_ASSEMBLY = 21, MDBGPROT_CMD_SET_METHOD = 22, MDBGPROT_CMD_SET_TYPE = 23, MDBGPROT_CMD_SET_MODULE = 24, MDBGPROT_CMD_SET_FIELD = 25, MDBGPROT_CMD_SET_EVENT = 64, MDBGPROT_CMD_SET_POINTER = 65 } MdbgProtCommandSet; typedef enum { MDBGPROT_ERR_NONE = 0, MDBGPROT_ERR_INVALID_OBJECT = 20, MDBGPROT_ERR_INVALID_FIELDID = 25, MDBGPROT_ERR_INVALID_FRAMEID = 30, MDBGPROT_ERR_NOT_IMPLEMENTED = 100, MDBGPROT_ERR_NOT_SUSPENDED = 101, MDBGPROT_ERR_INVALID_ARGUMENT = 102, MDBGPROT_ERR_UNLOADED = 103, MDBGPROT_ERR_NO_INVOCATION = 104, MDBGPROT_ERR_ABSENT_INFORMATION = 105, MDBGPROT_ERR_NO_SEQ_POINT_AT_IL_OFFSET = 106, MDBGPROT_ERR_INVOKE_ABORTED = 107, MDBGPROT_ERR_LOADER_ERROR = 200, /*XXX extend the protocol to pass this information down the pipe */ } MdbgProtErrorCode; typedef enum { MDBGPROT_TOKEN_TYPE_STRING = 0, MDBGPROT_TOKEN_TYPE_TYPE = 1, MDBGPROT_TOKEN_TYPE_FIELD = 2, MDBGPROT_TOKEN_TYPE_METHOD = 3, MDBGPROT_TOKEN_TYPE_UNKNOWN = 4 } MdbgProtDebuggerTokenType; typedef enum { MDBGPROT_VALUE_TYPE_ID_NULL = 0xf0, MDBGPROT_VALUE_TYPE_ID_TYPE = 0xf1, MDBGPROT_VALUE_TYPE_ID_PARENT_VTYPE = 0xf2, MDBGPROT_VALUE_TYPE_ID_FIXED_ARRAY = 0xf3 } MdbgProtValueTypeId; typedef enum { MDBGPROT_FRAME_FLAG_DEBUGGER_INVOKE = 1, MDBGPROT_FRAME_FLAG_NATIVE_TRANSITION = 2 } MdbgProtStackFrameFlags; typedef enum { MDBGPROT_INVOKE_FLAG_DISABLE_BREAKPOINTS = 1, MDBGPROT_INVOKE_FLAG_SINGLE_THREADED = 2, MDBGPROT_INVOKE_FLAG_RETURN_OUT_THIS = 4, MDBGPROT_INVOKE_FLAG_RETURN_OUT_ARGS = 8, MDBGPROT_INVOKE_FLAG_VIRTUAL = 16 } MdbgProtInvokeFlags; typedef enum { BINDING_FLAGS_IGNORE_CASE = 0x70000000, } MdbgProtBindingFlagsExtensions; typedef enum { MDBGPROT_CMD_THREAD_GET_FRAME_INFO = 1, MDBGPROT_CMD_THREAD_GET_NAME = 2, MDBGPROT_CMD_THREAD_GET_STATE = 3, MDBGPROT_CMD_THREAD_GET_INFO = 4, MDBGPROT_CMD_THREAD_GET_ID = 5, MDBGPROT_CMD_THREAD_GET_TID = 6, MDBGPROT_CMD_THREAD_SET_IP = 7, MDBGPROT_CMD_THREAD_ELAPSED_TIME = 8, MDBGPROT_CMD_THREAD_GET_APPDOMAIN = 9, MDBGPROT_CMD_THREAD_GET_CONTEXT = 10, MDBGPROT_CMD_THREAD_SET_CONTEXT = 11 } MdbgProtCmdThread; typedef enum { MDBGPROT_CMD_APPDOMAIN_GET_ROOT_DOMAIN = 1, MDBGPROT_CMD_APPDOMAIN_GET_FRIENDLY_NAME = 2, MDBGPROT_CMD_APPDOMAIN_GET_ASSEMBLIES = 3, MDBGPROT_CMD_APPDOMAIN_GET_ENTRY_ASSEMBLY = 4, MDBGPROT_CMD_APPDOMAIN_CREATE_STRING = 5, MDBGPROT_CMD_APPDOMAIN_GET_CORLIB = 6, MDBGPROT_CMD_APPDOMAIN_CREATE_BOXED_VALUE = 7, MDBGPROT_CMD_APPDOMAIN_CREATE_BYTE_ARRAY = 8, } MdbgProtCmdAppDomain; typedef enum { MDBGPROT_CMD_ASSEMBLY_GET_LOCATION = 1, MDBGPROT_CMD_ASSEMBLY_GET_ENTRY_POINT = 2, MDBGPROT_CMD_ASSEMBLY_GET_MANIFEST_MODULE = 3, MDBGPROT_CMD_ASSEMBLY_GET_OBJECT = 4, MDBGPROT_CMD_ASSEMBLY_GET_TYPE = 5, MDBGPROT_CMD_ASSEMBLY_GET_NAME = 6, MDBGPROT_CMD_ASSEMBLY_GET_DOMAIN = 7, MDBGPROT_CMD_ASSEMBLY_GET_METADATA_BLOB = 8, MDBGPROT_CMD_ASSEMBLY_GET_IS_DYNAMIC = 9, MDBGPROT_CMD_ASSEMBLY_GET_PDB_BLOB = 10, MDBGPROT_CMD_ASSEMBLY_GET_TYPE_FROM_TOKEN = 11, MDBGPROT_CMD_ASSEMBLY_GET_METHOD_FROM_TOKEN = 12, MDBGPROT_CMD_ASSEMBLY_HAS_DEBUG_INFO = 13, MDBGPROT_CMD_ASSEMBLY_GET_CATTRS = 14, MDBGPROT_CMD_ASSEMBLY_GET_CUSTOM_ATTRIBUTES = 15, MDBGPROT_CMD_ASSEMBLY_GET_PEIMAGE_ADDRESS = 16, } MdbgProtCmdAssembly; typedef enum { MDBGPROT_CMD_MODULE_GET_INFO = 1, MDBGPROT_CMD_MODULE_APPLY_CHANGES = 2 } MdbgProtCmdModule; typedef enum { MDBGPROT_CMD_FIELD_GET_INFO = 1, } MdbgProtCmdField; typedef enum { MDBGPROT_CMD_PROPERTY_GET_INFO = 1, } MdbgProtCmdProperty; typedef enum { MDBGPROT_CMD_METHOD_GET_NAME = 1, MDBGPROT_CMD_METHOD_GET_DECLARING_TYPE = 2, MDBGPROT_CMD_METHOD_GET_DEBUG_INFO = 3, MDBGPROT_CMD_METHOD_GET_PARAM_INFO = 4, MDBGPROT_CMD_METHOD_GET_LOCALS_INFO = 5, MDBGPROT_CMD_METHOD_GET_INFO = 6, MDBGPROT_CMD_METHOD_GET_BODY = 7, MDBGPROT_CMD_METHOD_RESOLVE_TOKEN = 8, MDBGPROT_CMD_METHOD_GET_CATTRS = 9, MDBGPROT_CMD_METHOD_MAKE_GENERIC_METHOD = 10, MDBGPROT_CMD_METHOD_TOKEN = 11, MDBGPROT_CMD_METHOD_ASSEMBLY = 12, MDBGPROT_CMD_METHOD_GET_CLASS_TOKEN = 13, MDBGPROT_CMD_METHOD_HAS_ASYNC_DEBUG_INFO = 14, MDBGPROT_CMD_METHOD_GET_NAME_FULL = 15 } MdbgProtCmdMethod; typedef enum { MDBGPROT_CMD_TYPE_GET_INFO = 1, MDBGPROT_CMD_TYPE_GET_METHODS = 2, MDBGPROT_CMD_TYPE_GET_FIELDS = 3, MDBGPROT_CMD_TYPE_GET_VALUES = 4, MDBGPROT_CMD_TYPE_GET_OBJECT = 5, MDBGPROT_CMD_TYPE_GET_SOURCE_FILES = 6, MDBGPROT_CMD_TYPE_SET_VALUES = 7, MDBGPROT_CMD_TYPE_IS_ASSIGNABLE_FROM = 8, MDBGPROT_CMD_TYPE_GET_PROPERTIES = 9, MDBGPROT_CMD_TYPE_GET_CATTRS = 10, MDBGPROT_CMD_TYPE_GET_FIELD_CATTRS = 11, MDBGPROT_CMD_TYPE_GET_PROPERTY_CATTRS = 12, MDBGPROT_CMD_TYPE_GET_SOURCE_FILES_2 = 13, MDBGPROT_CMD_TYPE_GET_VALUES_2 = 14, MDBGPROT_CMD_TYPE_GET_METHODS_BY_NAME_FLAGS = 15, MDBGPROT_CMD_TYPE_GET_INTERFACES = 16, MDBGPROT_CMD_TYPE_GET_INTERFACE_MAP = 17, MDBGPROT_CMD_TYPE_IS_INITIALIZED = 18, MDBGPROT_CMD_TYPE_CREATE_INSTANCE = 19, MDBGPROT_CMD_TYPE_GET_VALUE_SIZE = 20, MDBGPROT_CMD_TYPE_GET_VALUES_ICORDBG = 21, MDBGPROT_CMD_TYPE_GET_PARENTS = 22, MDBGPROT_CMD_TYPE_INITIALIZE = 23 } MdbgProtCmdType; typedef enum { MDBGPROT_CMD_STACK_FRAME_GET_VALUES = 1, MDBGPROT_CMD_STACK_FRAME_GET_THIS = 2, MDBGPROT_CMD_STACK_FRAME_SET_VALUES = 3, MDBGPROT_CMD_STACK_FRAME_GET_DOMAIN = 4, MDBGPROT_CMD_STACK_FRAME_SET_THIS = 5, MDBGPROT_CMD_STACK_FRAME_GET_ARGUMENT = 6, MDBGPROT_CMD_STACK_FRAME_GET_ARGUMENTS = 7 } MdbgProtCmdStackFrame; typedef enum { MDBGPROT_CMD_ARRAY_REF_GET_LENGTH = 1, MDBGPROT_CMD_ARRAY_REF_GET_VALUES = 2, MDBGPROT_CMD_ARRAY_REF_SET_VALUES = 3, MDBGPROT_CMD_ARRAY_REF_GET_TYPE = 4 } MdbgProtCmdArray; typedef enum { MDBGPROT_CMD_STRING_REF_GET_VALUE = 1, MDBGPROT_CMD_STRING_REF_GET_LENGTH = 2, MDBGPROT_CMD_STRING_REF_GET_CHARS = 3 } MdbgProtCmdString; typedef enum { MDBGPROT_CMD_POINTER_GET_VALUE = 1 } MdbgProtCmdPointer; typedef enum { MDBGPROT_CMD_OBJECT_REF_GET_TYPE = 1, MDBGPROT_CMD_OBJECT_REF_GET_VALUES = 2, MDBGPROT_CMD_OBJECT_REF_IS_COLLECTED = 3, MDBGPROT_CMD_OBJECT_REF_GET_ADDRESS = 4, MDBGPROT_CMD_OBJECT_REF_GET_DOMAIN = 5, MDBGPROT_CMD_OBJECT_REF_SET_VALUES = 6, MDBGPROT_CMD_OBJECT_REF_GET_INFO = 7, MDBGPROT_CMD_OBJECT_REF_GET_VALUES_ICORDBG = 8, MDBGPROT_CMD_OBJECT_REF_DELEGATE_GET_METHOD = 9, MDBGPROT_CMD_OBJECT_IS_DELEGATE = 10 } MdbgProtCmdObject; typedef enum { MDBGPROT_SUSPEND_POLICY_NONE = 0, MDBGPROT_SUSPEND_POLICY_EVENT_THREAD = 1, MDBGPROT_SUSPEND_POLICY_ALL = 2 } MdbgProtSuspendPolicy; typedef enum { MDBGPROT_CMD_EVENT_REQUEST_SET = 1, MDBGPROT_CMD_EVENT_REQUEST_CLEAR = 2, MDBGPROT_CMD_EVENT_REQUEST_CLEAR_ALL_BREAKPOINTS = 3 } MdbgProtCmdEvent; typedef struct { uint8_t *buf, *p, *end; } MdbgProtBuffer; typedef struct { int len; int id; int flags; int command_set; int command; int error; int error_2; } MdbgProtHeader; typedef struct ReplyPacket { int id; int error; MdbgProtBuffer *data; } MdbgProtReplyPacket; typedef enum { MDBGPROT_EVENT_KIND_VM_START = 0, MDBGPROT_EVENT_KIND_VM_DEATH = 1, MDBGPROT_EVENT_KIND_THREAD_START = 2, MDBGPROT_EVENT_KIND_THREAD_DEATH = 3, MDBGPROT_EVENT_KIND_APPDOMAIN_CREATE = 4, MDBGPROT_EVENT_KIND_APPDOMAIN_UNLOAD = 5, MDBGPROT_EVENT_KIND_METHOD_ENTRY = 6, MDBGPROT_EVENT_KIND_METHOD_EXIT = 7, MDBGPROT_EVENT_KIND_ASSEMBLY_LOAD = 8, MDBGPROT_EVENT_KIND_ASSEMBLY_UNLOAD = 9, MDBGPROT_EVENT_KIND_BREAKPOINT = 10, MDBGPROT_EVENT_KIND_STEP = 11, MDBGPROT_EVENT_KIND_TYPE_LOAD = 12, MDBGPROT_EVENT_KIND_EXCEPTION = 13, MDBGPROT_EVENT_KIND_KEEPALIVE = 14, MDBGPROT_EVENT_KIND_USER_BREAK = 15, MDBGPROT_EVENT_KIND_USER_LOG = 16, MDBGPROT_EVENT_KIND_CRASH = 17, MDBGPROT_EVENT_KIND_ENC_UPDATE = 18, MDBGPROT_EVENT_KIND_METHOD_UPDATE = 19, } MdbgProtEventKind; typedef enum { MDBGPROT_MOD_KIND_COUNT = 1, MDBGPROT_MOD_KIND_THREAD_ONLY = 3, MDBGPROT_MOD_KIND_LOCATION_ONLY = 7, MDBGPROT_MOD_KIND_EXCEPTION_ONLY = 8, MDBGPROT_MOD_KIND_STEP = 10, MDBGPROT_MOD_KIND_ASSEMBLY_ONLY = 11, MDBGPROT_MOD_KIND_SOURCE_FILE_ONLY = 12, MDBGPROT_MOD_KIND_TYPE_NAME_ONLY = 13, MDBGPROT_MOD_KIND_NONE = 14 } MdbgProtModifierKind; typedef enum { MDBGPROT_STEP_DEPTH_INTO = 0, MDBGPROT_STEP_DEPTH_OVER = 1, MDBGPROT_STEP_DEPTH_OUT = 2 } MdbgProtStepDepth; typedef enum { MDBGPROT_STEP_SIZE_MIN = 0, MDBGPROT_STEP_SIZE_LINE = 1 } MdbgProtStepSize; typedef enum { MDBGPROT_STEP_FILTER_NONE = 0, MDBGPROT_STEP_FILTER_STATIC_CTOR = 1, MDBGPROT_STEP_FILTER_DEBUGGER_HIDDEN = 2, MDBGPROT_STEP_FILTER_DEBUGGER_STEP_THROUGH = 4, MDBGPROT_STEP_FILTER_DEBUGGER_NON_USER_CODE = 8 } MdbgProtStepFilter; /* * IDS */ typedef enum { ID_ASSEMBLY = 0, ID_MODULE = 1, ID_TYPE = 2, ID_METHOD = 3, ID_FIELD = 4, ID_DOMAIN = 5, ID_PROPERTY = 6, ID_PARAMETER = 7, ID_NUM } IdType; int m_dbgprot_buffer_add_command_header (MdbgProtBuffer *recvbuf, int cmd_set, int cmd, MdbgProtBuffer *out); void m_dbgprot_decode_command_header (MdbgProtBuffer *recvbuf, MdbgProtHeader *header); /* * Functions to decode protocol data */ int m_dbgprot_decode_byte (uint8_t *buf, uint8_t **endbuf, uint8_t *limit); int m_dbgprot_decode_int (uint8_t *buf, uint8_t **endbuf, uint8_t *limit); int64_t m_dbgprot_decode_long (uint8_t *buf, uint8_t **endbuf, uint8_t *limit); int m_dbgprot_decode_id (uint8_t *buf, uint8_t **endbuf, uint8_t *limit); char* m_dbgprot_decode_string (uint8_t *buf, uint8_t **endbuf, uint8_t *limit); char* m_dbgprot_decode_string_with_len(uint8_t* buf, uint8_t** endbuf, uint8_t* limit, int *len); uint8_t* m_dbgprot_decode_byte_array(uint8_t *buf, uint8_t **endbuf, uint8_t *limit, int32_t *len); /* * Functions to encode protocol data */ void m_dbgprot_buffer_init (MdbgProtBuffer *buf, uint32_t size); uint32_t m_dbgprot_buffer_len (MdbgProtBuffer *buf); void m_dbgprot_buffer_make_room (MdbgProtBuffer *buf, uint32_t size); void m_dbgprot_buffer_add_byte (MdbgProtBuffer *buf, uint8_t val); void m_dbgprot_buffer_add_short (MdbgProtBuffer *buf, uint32_t val); void m_dbgprot_buffer_add_int (MdbgProtBuffer *buf, uint32_t val); void m_dbgprot_buffer_add_long (MdbgProtBuffer *buf, uint64_t l); void m_dbgprot_buffer_add_id (MdbgProtBuffer *buf, uint32_t id); void m_dbgprot_buffer_add_data (MdbgProtBuffer *buf, uint8_t *data, uint32_t len); void m_dbgprot_buffer_add_utf16 (MdbgProtBuffer *buf, uint8_t *data, uint32_t len); void m_dbgprot_buffer_add_string (MdbgProtBuffer *buf, const char *str); void m_dbgprot_buffer_add_byte_array (MdbgProtBuffer *buf, uint8_t *bytes, uint32_t arr_len); void m_dbgprot_buffer_add_buffer (MdbgProtBuffer *buf, MdbgProtBuffer *data); void m_dbgprot_buffer_free (MdbgProtBuffer *buf); const char* m_dbgprot_event_to_string (MdbgProtEventKind event); #endif
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/pal/inc/rt/ocidl.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // // =========================================================================== // File: ocidl.h // // =========================================================================== // dummy ocidl.h for PAL #include "palrt.h"
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // // =========================================================================== // File: ocidl.h // // =========================================================================== // dummy ocidl.h for PAL #include "palrt.h"
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/nativeaot/Runtime/windows/PalRedhawkInline.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Implementation of Redhawk PAL inline functions EXTERN_C long __cdecl _InterlockedIncrement(long volatile *); #pragma intrinsic(_InterlockedIncrement) FORCEINLINE int32_t PalInterlockedIncrement(_Inout_ int32_t volatile *pDst) { return _InterlockedIncrement((long volatile *)pDst); } EXTERN_C long __cdecl _InterlockedDecrement(long volatile *); #pragma intrinsic(_InterlockedDecrement) FORCEINLINE int32_t PalInterlockedDecrement(_Inout_ int32_t volatile *pDst) { return _InterlockedDecrement((long volatile *)pDst); } EXTERN_C long _InterlockedOr(long volatile *, long); #pragma intrinsic(_InterlockedOr) FORCEINLINE uint32_t PalInterlockedOr(_Inout_ uint32_t volatile *pDst, uint32_t iValue) { return _InterlockedOr((long volatile *)pDst, iValue); } EXTERN_C long _InterlockedAnd(long volatile *, long); #pragma intrinsic(_InterlockedAnd) FORCEINLINE uint32_t PalInterlockedAnd(_Inout_ uint32_t volatile *pDst, uint32_t iValue) { return _InterlockedAnd((long volatile *)pDst, iValue); } EXTERN_C long __PN__MACHINECALL_CDECL_OR_DEFAULT _InterlockedExchange(long volatile *, long); #pragma intrinsic(_InterlockedExchange) FORCEINLINE int32_t PalInterlockedExchange(_Inout_ int32_t volatile *pDst, int32_t iValue) { return _InterlockedExchange((long volatile *)pDst, iValue); } EXTERN_C long __PN__MACHINECALL_CDECL_OR_DEFAULT _InterlockedCompareExchange(long volatile *, long, long); #pragma intrinsic(_InterlockedCompareExchange) FORCEINLINE int32_t PalInterlockedCompareExchange(_Inout_ int32_t volatile *pDst, int32_t iValue, int32_t iComparand) { return _InterlockedCompareExchange((long volatile *)pDst, iValue, iComparand); } EXTERN_C int64_t _InterlockedCompareExchange64(int64_t volatile *, int64_t, int64_t); #pragma intrinsic(_InterlockedCompareExchange64) FORCEINLINE int64_t PalInterlockedCompareExchange64(_Inout_ int64_t volatile *pDst, int64_t iValue, int64_t iComparand) { return _InterlockedCompareExchange64(pDst, iValue, iComparand); } #if defined(HOST_AMD64) || defined(HOST_ARM64) EXTERN_C uint8_t _InterlockedCompareExchange128(int64_t volatile *, int64_t, int64_t, int64_t *); #pragma intrinsic(_InterlockedCompareExchange128) FORCEINLINE uint8_t PalInterlockedCompareExchange128(_Inout_ int64_t volatile *pDst, int64_t iValueHigh, int64_t iValueLow, int64_t *pComparandAndResult) { return _InterlockedCompareExchange128(pDst, iValueHigh, iValueLow, pComparandAndResult); } #endif // HOST_AMD64 #ifdef HOST_64BIT EXTERN_C void * _InterlockedExchangePointer(void * volatile *, void *); #pragma intrinsic(_InterlockedExchangePointer) FORCEINLINE void * PalInterlockedExchangePointer(_Inout_ void * volatile *pDst, _In_ void *pValue) { return _InterlockedExchangePointer((void * volatile *)pDst, pValue); } EXTERN_C void * _InterlockedCompareExchangePointer(void * volatile *, void *, void *); #pragma intrinsic(_InterlockedCompareExchangePointer) FORCEINLINE void * PalInterlockedCompareExchangePointer(_Inout_ void * volatile *pDst, _In_ void *pValue, _In_ void *pComparand) { return _InterlockedCompareExchangePointer((void * volatile *)pDst, pValue, pComparand); } #else // HOST_64BIT #define PalInterlockedExchangePointer(_pDst, _pValue) \ ((void *)_InterlockedExchange((long volatile *)(_pDst), (long)(size_t)(_pValue))) #define PalInterlockedCompareExchangePointer(_pDst, _pValue, _pComparand) \ ((void *)_InterlockedCompareExchange((long volatile *)(_pDst), (long)(size_t)(_pValue), (long)(size_t)(_pComparand))) #endif // HOST_64BIT EXTERN_C __declspec(dllimport) unsigned long __stdcall GetLastError(); FORCEINLINE int PalGetLastError() { return (int)GetLastError(); } EXTERN_C __declspec(dllimport) void __stdcall SetLastError(unsigned long error); FORCEINLINE void PalSetLastError(int error) { SetLastError((unsigned long)error); } #if defined(HOST_X86) EXTERN_C void _mm_pause(); #pragma intrinsic(_mm_pause) #define PalYieldProcessor() _mm_pause() FORCEINLINE void PalMemoryBarrier() { long Barrier; _InterlockedOr(&Barrier, 0); } #elif defined(HOST_AMD64) EXTERN_C void _mm_pause(); #pragma intrinsic(_mm_pause) #define PalYieldProcessor() _mm_pause() EXTERN_C void __faststorefence(); #pragma intrinsic(__faststorefence) #define PalMemoryBarrier() __faststorefence() #elif defined(HOST_ARM) EXTERN_C void __yield(void); #pragma intrinsic(__yield) EXTERN_C void __dmb(unsigned int _Type); #pragma intrinsic(__dmb) FORCEINLINE void PalYieldProcessor() { __dmb(0xA /* _ARM_BARRIER_ISHST */); __yield(); } #define PalMemoryBarrier() __dmb(0xF /* _ARM_BARRIER_SY */) #elif defined(HOST_ARM64) EXTERN_C void __yield(void); #pragma intrinsic(__yield) EXTERN_C void __dmb(unsigned int _Type); #pragma intrinsic(__dmb) FORCEINLINE void PalYieldProcessor() { __dmb(0xA /* _ARM64_BARRIER_ISHST */); __yield(); } #define PalMemoryBarrier() __dmb(0xF /* _ARM64_BARRIER_SY */) #else #error Unsupported architecture #endif #define PalDebugBreak() __debugbreak()
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Implementation of Redhawk PAL inline functions EXTERN_C long __cdecl _InterlockedIncrement(long volatile *); #pragma intrinsic(_InterlockedIncrement) FORCEINLINE int32_t PalInterlockedIncrement(_Inout_ int32_t volatile *pDst) { return _InterlockedIncrement((long volatile *)pDst); } EXTERN_C long __cdecl _InterlockedDecrement(long volatile *); #pragma intrinsic(_InterlockedDecrement) FORCEINLINE int32_t PalInterlockedDecrement(_Inout_ int32_t volatile *pDst) { return _InterlockedDecrement((long volatile *)pDst); } EXTERN_C long _InterlockedOr(long volatile *, long); #pragma intrinsic(_InterlockedOr) FORCEINLINE uint32_t PalInterlockedOr(_Inout_ uint32_t volatile *pDst, uint32_t iValue) { return _InterlockedOr((long volatile *)pDst, iValue); } EXTERN_C long _InterlockedAnd(long volatile *, long); #pragma intrinsic(_InterlockedAnd) FORCEINLINE uint32_t PalInterlockedAnd(_Inout_ uint32_t volatile *pDst, uint32_t iValue) { return _InterlockedAnd((long volatile *)pDst, iValue); } EXTERN_C long __PN__MACHINECALL_CDECL_OR_DEFAULT _InterlockedExchange(long volatile *, long); #pragma intrinsic(_InterlockedExchange) FORCEINLINE int32_t PalInterlockedExchange(_Inout_ int32_t volatile *pDst, int32_t iValue) { return _InterlockedExchange((long volatile *)pDst, iValue); } EXTERN_C long __PN__MACHINECALL_CDECL_OR_DEFAULT _InterlockedCompareExchange(long volatile *, long, long); #pragma intrinsic(_InterlockedCompareExchange) FORCEINLINE int32_t PalInterlockedCompareExchange(_Inout_ int32_t volatile *pDst, int32_t iValue, int32_t iComparand) { return _InterlockedCompareExchange((long volatile *)pDst, iValue, iComparand); } EXTERN_C int64_t _InterlockedCompareExchange64(int64_t volatile *, int64_t, int64_t); #pragma intrinsic(_InterlockedCompareExchange64) FORCEINLINE int64_t PalInterlockedCompareExchange64(_Inout_ int64_t volatile *pDst, int64_t iValue, int64_t iComparand) { return _InterlockedCompareExchange64(pDst, iValue, iComparand); } #if defined(HOST_AMD64) || defined(HOST_ARM64) EXTERN_C uint8_t _InterlockedCompareExchange128(int64_t volatile *, int64_t, int64_t, int64_t *); #pragma intrinsic(_InterlockedCompareExchange128) FORCEINLINE uint8_t PalInterlockedCompareExchange128(_Inout_ int64_t volatile *pDst, int64_t iValueHigh, int64_t iValueLow, int64_t *pComparandAndResult) { return _InterlockedCompareExchange128(pDst, iValueHigh, iValueLow, pComparandAndResult); } #endif // HOST_AMD64 #ifdef HOST_64BIT EXTERN_C void * _InterlockedExchangePointer(void * volatile *, void *); #pragma intrinsic(_InterlockedExchangePointer) FORCEINLINE void * PalInterlockedExchangePointer(_Inout_ void * volatile *pDst, _In_ void *pValue) { return _InterlockedExchangePointer((void * volatile *)pDst, pValue); } EXTERN_C void * _InterlockedCompareExchangePointer(void * volatile *, void *, void *); #pragma intrinsic(_InterlockedCompareExchangePointer) FORCEINLINE void * PalInterlockedCompareExchangePointer(_Inout_ void * volatile *pDst, _In_ void *pValue, _In_ void *pComparand) { return _InterlockedCompareExchangePointer((void * volatile *)pDst, pValue, pComparand); } #else // HOST_64BIT #define PalInterlockedExchangePointer(_pDst, _pValue) \ ((void *)_InterlockedExchange((long volatile *)(_pDst), (long)(size_t)(_pValue))) #define PalInterlockedCompareExchangePointer(_pDst, _pValue, _pComparand) \ ((void *)_InterlockedCompareExchange((long volatile *)(_pDst), (long)(size_t)(_pValue), (long)(size_t)(_pComparand))) #endif // HOST_64BIT EXTERN_C __declspec(dllimport) unsigned long __stdcall GetLastError(); FORCEINLINE int PalGetLastError() { return (int)GetLastError(); } EXTERN_C __declspec(dllimport) void __stdcall SetLastError(unsigned long error); FORCEINLINE void PalSetLastError(int error) { SetLastError((unsigned long)error); } #if defined(HOST_X86) EXTERN_C void _mm_pause(); #pragma intrinsic(_mm_pause) #define PalYieldProcessor() _mm_pause() FORCEINLINE void PalMemoryBarrier() { long Barrier; _InterlockedOr(&Barrier, 0); } #elif defined(HOST_AMD64) EXTERN_C void _mm_pause(); #pragma intrinsic(_mm_pause) #define PalYieldProcessor() _mm_pause() EXTERN_C void __faststorefence(); #pragma intrinsic(__faststorefence) #define PalMemoryBarrier() __faststorefence() #elif defined(HOST_ARM) EXTERN_C void __yield(void); #pragma intrinsic(__yield) EXTERN_C void __dmb(unsigned int _Type); #pragma intrinsic(__dmb) FORCEINLINE void PalYieldProcessor() { __dmb(0xA /* _ARM_BARRIER_ISHST */); __yield(); } #define PalMemoryBarrier() __dmb(0xF /* _ARM_BARRIER_SY */) #elif defined(HOST_ARM64) EXTERN_C void __yield(void); #pragma intrinsic(__yield) EXTERN_C void __dmb(unsigned int _Type); #pragma intrinsic(__dmb) FORCEINLINE void PalYieldProcessor() { __dmb(0xA /* _ARM64_BARRIER_ISHST */); __yield(); } #define PalMemoryBarrier() __dmb(0xF /* _ARM64_BARRIER_SY */) #else #error Unsupported architecture #endif #define PalDebugBreak() __debugbreak()
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/pal/tests/palsuite/file_io/CopyFileA/test1/CopyFileA.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: CopyFileA.c ** ** Purpose: Tests the PAL implementation of the CopyFileA function ** ** **===================================================================*/ /* 1. copy an existing file to existing with overwrite true 2. copy an existing file to existing with overwrite false 3. copy an existing file to non-existant with overwrite true 4. copy an existing file to non-existant with overwrite false 5. copy non-existant file to existing with overwrite true 6. copy non-existant file to existing with overwrite false 7. copy non-existant file to non-existant with overwrite true 8. copy non-existant file to non-existant with overwrite false */ #include <palsuite.h> struct TESTS{ char* lpSource; char* lpDestination; BOOL bFailIfExists; int nResult; }; PALTEST(file_io_CopyFileA_test1_paltest_copyfilea_test1, "file_io/CopyFileA/test1/paltest_copyfilea_test1") { char szSrcExisting[] = {"src_existing.tmp"}; char szSrcNonExistant[] = {"src_non-existant.tmp"}; char szDstExisting[] = {"dst_existing.tmp"}; char szDstNonExistant[] = {"dst_non-existant.tmp"}; BOOL bRc = TRUE; BOOL bSuccess = TRUE; FILE* tempFile = NULL; int i; struct TESTS testCase[] = { {szSrcExisting, szDstExisting, FALSE, 1}, {szSrcExisting, szDstExisting, TRUE, 0}, {szSrcExisting, szDstNonExistant, FALSE, 1}, {szSrcExisting, szDstNonExistant, TRUE, 1}, {szSrcNonExistant, szDstExisting, FALSE, 0}, {szSrcNonExistant, szDstExisting, TRUE, 0}, {szSrcNonExistant, szDstNonExistant, FALSE, 0}, {szSrcNonExistant, szDstNonExistant, TRUE, 0} }; if (0 != PAL_Initialize(argc,argv)) { return FAIL; } /* create the src_existing file */ tempFile = fopen(szSrcExisting, "w"); if (tempFile != NULL) { fprintf(tempFile, "CopyFileA test file: src_existing.tmp\n"); fclose(tempFile); } else { Fail("CopyFileA: ERROR-> Couldn't create \"src_existing.tmp\" with " "error %ld\n", GetLastError()); } /* create the dst_existing file */ tempFile = fopen(szDstExisting, "w"); if (tempFile != NULL) { fprintf(tempFile, "CopyFileA test file: dst_existing.tmp\n"); fclose(tempFile); } else { Fail("CopyFileA: ERROR-> Couldn't create \"dst_existing.tmp\" with " "error %ld\n", GetLastError()); } for (i = 0; i < (sizeof(testCase) / sizeof(struct TESTS)); i++) { bRc = CopyFileA(testCase[i].lpSource, testCase[i].lpDestination, testCase[i].bFailIfExists); if (!bRc) { if (testCase[i].nResult == 1) { Trace("CopyFileA: FAILED: %s -> %s with bFailIfExists = %d " "with error %ld\n", testCase[i].lpSource, testCase[i].lpDestination, testCase[i].bFailIfExists, GetLastError()); bSuccess = FALSE; } } else { if (testCase[i].nResult == 0) { Trace("CopyFileA: FAILED: %s -> %s with bFailIfExists = %d\n", testCase[i].lpSource, testCase[i].lpDestination, testCase[i].bFailIfExists); bSuccess = FALSE; } else { /* verify the file was moved */ if (GetFileAttributesA(testCase[i].lpDestination) == -1) { Trace("CopyFileA: GetFileAttributes of destination file " "failed with error code %ld. \n", GetLastError()); bSuccess = FALSE; } else if (GetFileAttributesA(testCase[i].lpSource) == -1) { Trace("CopyFileA: GetFileAttributes of source file " "failed with error code %ld. \n", GetLastError()); bSuccess = FALSE; } else { /* verify attributes of destination file to source file*/ if(GetFileAttributes(testCase[i].lpSource) != GetFileAttributes(testCase[i].lpDestination)) { Trace("CopyFileA : The file attributes of the " "destination file do not match the file " "attributes of the source file.\n"); bSuccess = FALSE; } } } } /* delete file file but don't worry if it fails */ DeleteFileA(szDstNonExistant); } int exitCode = bSuccess ? PASS : FAIL; PAL_TerminateEx(exitCode); return exitCode; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: CopyFileA.c ** ** Purpose: Tests the PAL implementation of the CopyFileA function ** ** **===================================================================*/ /* 1. copy an existing file to existing with overwrite true 2. copy an existing file to existing with overwrite false 3. copy an existing file to non-existant with overwrite true 4. copy an existing file to non-existant with overwrite false 5. copy non-existant file to existing with overwrite true 6. copy non-existant file to existing with overwrite false 7. copy non-existant file to non-existant with overwrite true 8. copy non-existant file to non-existant with overwrite false */ #include <palsuite.h> struct TESTS{ char* lpSource; char* lpDestination; BOOL bFailIfExists; int nResult; }; PALTEST(file_io_CopyFileA_test1_paltest_copyfilea_test1, "file_io/CopyFileA/test1/paltest_copyfilea_test1") { char szSrcExisting[] = {"src_existing.tmp"}; char szSrcNonExistant[] = {"src_non-existant.tmp"}; char szDstExisting[] = {"dst_existing.tmp"}; char szDstNonExistant[] = {"dst_non-existant.tmp"}; BOOL bRc = TRUE; BOOL bSuccess = TRUE; FILE* tempFile = NULL; int i; struct TESTS testCase[] = { {szSrcExisting, szDstExisting, FALSE, 1}, {szSrcExisting, szDstExisting, TRUE, 0}, {szSrcExisting, szDstNonExistant, FALSE, 1}, {szSrcExisting, szDstNonExistant, TRUE, 1}, {szSrcNonExistant, szDstExisting, FALSE, 0}, {szSrcNonExistant, szDstExisting, TRUE, 0}, {szSrcNonExistant, szDstNonExistant, FALSE, 0}, {szSrcNonExistant, szDstNonExistant, TRUE, 0} }; if (0 != PAL_Initialize(argc,argv)) { return FAIL; } /* create the src_existing file */ tempFile = fopen(szSrcExisting, "w"); if (tempFile != NULL) { fprintf(tempFile, "CopyFileA test file: src_existing.tmp\n"); fclose(tempFile); } else { Fail("CopyFileA: ERROR-> Couldn't create \"src_existing.tmp\" with " "error %ld\n", GetLastError()); } /* create the dst_existing file */ tempFile = fopen(szDstExisting, "w"); if (tempFile != NULL) { fprintf(tempFile, "CopyFileA test file: dst_existing.tmp\n"); fclose(tempFile); } else { Fail("CopyFileA: ERROR-> Couldn't create \"dst_existing.tmp\" with " "error %ld\n", GetLastError()); } for (i = 0; i < (sizeof(testCase) / sizeof(struct TESTS)); i++) { bRc = CopyFileA(testCase[i].lpSource, testCase[i].lpDestination, testCase[i].bFailIfExists); if (!bRc) { if (testCase[i].nResult == 1) { Trace("CopyFileA: FAILED: %s -> %s with bFailIfExists = %d " "with error %ld\n", testCase[i].lpSource, testCase[i].lpDestination, testCase[i].bFailIfExists, GetLastError()); bSuccess = FALSE; } } else { if (testCase[i].nResult == 0) { Trace("CopyFileA: FAILED: %s -> %s with bFailIfExists = %d\n", testCase[i].lpSource, testCase[i].lpDestination, testCase[i].bFailIfExists); bSuccess = FALSE; } else { /* verify the file was moved */ if (GetFileAttributesA(testCase[i].lpDestination) == -1) { Trace("CopyFileA: GetFileAttributes of destination file " "failed with error code %ld. \n", GetLastError()); bSuccess = FALSE; } else if (GetFileAttributesA(testCase[i].lpSource) == -1) { Trace("CopyFileA: GetFileAttributes of source file " "failed with error code %ld. \n", GetLastError()); bSuccess = FALSE; } else { /* verify attributes of destination file to source file*/ if(GetFileAttributes(testCase[i].lpSource) != GetFileAttributes(testCase[i].lpDestination)) { Trace("CopyFileA : The file attributes of the " "destination file do not match the file " "attributes of the source file.\n"); bSuccess = FALSE; } } } } /* delete file file but don't worry if it fails */ DeleteFileA(szDstNonExistant); } int exitCode = bSuccess ? PASS : FAIL; PAL_TerminateEx(exitCode); return exitCode; }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/native/eventpipe/ep-buffer.h
#ifndef __EVENTPIPE_BUFFER_H__ #define __EVENTPIPE_BUFFER_H__ #include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #include "ep-types.h" #include "ep-rt.h" #undef EP_IMPL_GETTER_SETTER #ifdef EP_IMPL_BUFFER_GETTER_SETTER #define EP_IMPL_GETTER_SETTER #endif #include "ep-getter-setter.h" /* * EventPipeBuffer. */ // Synchronization // // EventPipeBuffer starts off writable and accumulates events in a buffer, then at some point converts to be readable and a second thread can // read back the events which have accumulated. The transition occurs when calling convert_to_read_only (). Write methods will assert if the buffer // isn't writable and read-related methods will assert if it isn't readable. Methods that have no asserts should have immutable results that // can be used at any point during the buffer's lifetime. The buffer has no internal locks so it is the caller's responsibility to synchronize // their usage. // Writing into the buffer and calling convert_to_read_only() is always done with EventPipeThread rt_lock held. The eventual reader thread can do // a few different things to ensure it sees a consistent state: // 1) Take the writer's EventPipeThread rt_lock at least once after the last time the writer writes events // 2) Use a memory barrier that prevents reader loads from being re-ordered earlier, such as the one that will occur implicitly by evaluating // ep_buffer_get_volatile_state () // Instances of EventPipeEventInstance in the buffer must be 8-byte aligned. // It is OK for the data payloads to be unaligned because they are opaque blobs that are copied via memcpy. #define EP_BUFFER_ALIGNMENT_SIZE 8 #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_BUFFER_GETTER_SETTER) struct _EventPipeBuffer { #else struct _EventPipeBuffer_Internal { #endif // The timestamp the buffer was created. If our clock source // is monotonic then all events in the buffer should have // timestamp >= this one. If not then all bets are off. ep_timestamp_t creation_timestamp; // Thread that is/was allowed to write into this buffer when state == WRITABLE. EventPipeThread *writer_thread; // A pointer to the actual buffer. uint8_t *buffer; // The current write pointer. uint8_t *current; // The max write pointer (end of the buffer). uint8_t *limit; // Pointer to the current event being read. EventPipeEventInstance *current_read_event; // Each buffer will become part of a per-thread linked list of buffers. // The linked list is invasive, thus we declare the pointers here. EventPipeBuffer *prev_buffer; EventPipeBuffer *next_buffer; // State transition WRITABLE -> READ_ONLY only occurs while holding the writer_thread->rt_lock; // It can be read at any time volatile uint32_t state; // The sequence number corresponding to current_read_event // Prior to read iteration it is the sequence number of the first event in the buffer uint32_t event_sequence_number; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_BUFFER_GETTER_SETTER) struct _EventPipeBuffer { uint8_t _internal [sizeof (struct _EventPipeBuffer_Internal)]; }; #endif EP_DEFINE_GETTER(EventPipeBuffer *, buffer, ep_timestamp_t, creation_timestamp) EP_DEFINE_GETTER(EventPipeBuffer *, buffer, uint8_t *, buffer) EP_DEFINE_GETTER(EventPipeBuffer *, buffer, uint8_t *, limit) EP_DEFINE_GETTER_REF(EventPipeBuffer *, buffer, volatile uint32_t *, state) EP_DEFINE_GETTER(EventPipeBuffer *, buffer, EventPipeBuffer *, prev_buffer) EP_DEFINE_SETTER(EventPipeBuffer *, buffer, EventPipeBuffer *, prev_buffer) EP_DEFINE_GETTER(EventPipeBuffer *, buffer, EventPipeBuffer *, next_buffer) EP_DEFINE_SETTER(EventPipeBuffer *, buffer, EventPipeBuffer *, next_buffer) EP_DEFINE_GETTER(EventPipeBuffer *, buffer, EventPipeThread *, writer_thread) EventPipeBuffer * ep_buffer_alloc ( uint32_t buffer_size, EventPipeThread *writer_thread, uint32_t event_sequence_number); void ep_buffer_free (EventPipeBuffer *buffer); static inline uint32_t ep_buffer_get_size (const EventPipeBuffer *buffer) { return (uint32_t)(ep_buffer_get_limit (buffer) - ep_buffer_get_buffer (buffer)); } static EP_ALWAYS_INLINE uint8_t * ep_buffer_get_next_aligned_address (const EventPipeBuffer *buffer, uint8_t *address) { EP_ASSERT (ep_buffer_get_buffer (buffer) <= address && address <= ep_buffer_get_limit (buffer)); address = (uint8_t *)EP_ALIGN_UP (address, EP_BUFFER_ALIGNMENT_SIZE); EP_ASSERT ((size_t)address % EP_BUFFER_ALIGNMENT_SIZE == 0); return address; } // Write an event to the buffer. // An optional stack trace can be provided for sample profiler events. // Otherwise, if a stack trace is needed, one will be automatically collected. // Returns: // - true: The write succeeded. // - false: The write failed. In this case, the buffer should be considered full. bool ep_buffer_write_event ( EventPipeBuffer *buffer, ep_rt_thread_handle_t thread, EventPipeSession *session, EventPipeEvent *ep_event, EventPipeEventPayload *payload, const uint8_t *activity_id, const uint8_t *related_activity_id, EventPipeStackContents *stack); // Advances read cursor to the next event or NULL if there aren't any more. When the // buffer is first made readable the cursor is automatically positioned on the first // event or NULL if there are no events in the buffer. void ep_buffer_move_next_read_event (EventPipeBuffer *buffer); // Returns the event at the current read cursor. The returned event pointer is valid // until the buffer is deleted. EventPipeEventInstance * ep_buffer_get_current_read_event (const EventPipeBuffer *buffer); // Gets the sequence number of the event corresponding to get_current_read_event (). uint32_t ep_buffer_get_current_sequence_number (const EventPipeBuffer *buffer); // Check the state of the buffer. EventPipeBufferState ep_buffer_get_volatile_state (const EventPipeBuffer *buffer); // Convert the buffer writable to readable. // _Requires_lock_held (thread) void ep_buffer_convert_to_read_only (EventPipeBuffer *buffer); #ifdef EP_CHECKED_BUILD bool ep_buffer_ensure_consistency (const EventPipeBuffer *buffer); #endif #endif /* ENABLE_PERFTRACING */ #endif /* __EVENTPIPE_BUFFER_H__ */
#ifndef __EVENTPIPE_BUFFER_H__ #define __EVENTPIPE_BUFFER_H__ #include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #include "ep-types.h" #include "ep-rt.h" #undef EP_IMPL_GETTER_SETTER #ifdef EP_IMPL_BUFFER_GETTER_SETTER #define EP_IMPL_GETTER_SETTER #endif #include "ep-getter-setter.h" /* * EventPipeBuffer. */ // Synchronization // // EventPipeBuffer starts off writable and accumulates events in a buffer, then at some point converts to be readable and a second thread can // read back the events which have accumulated. The transition occurs when calling convert_to_read_only (). Write methods will assert if the buffer // isn't writable and read-related methods will assert if it isn't readable. Methods that have no asserts should have immutable results that // can be used at any point during the buffer's lifetime. The buffer has no internal locks so it is the caller's responsibility to synchronize // their usage. // Writing into the buffer and calling convert_to_read_only() is always done with EventPipeThread rt_lock held. The eventual reader thread can do // a few different things to ensure it sees a consistent state: // 1) Take the writer's EventPipeThread rt_lock at least once after the last time the writer writes events // 2) Use a memory barrier that prevents reader loads from being re-ordered earlier, such as the one that will occur implicitly by evaluating // ep_buffer_get_volatile_state () // Instances of EventPipeEventInstance in the buffer must be 8-byte aligned. // It is OK for the data payloads to be unaligned because they are opaque blobs that are copied via memcpy. #define EP_BUFFER_ALIGNMENT_SIZE 8 #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_BUFFER_GETTER_SETTER) struct _EventPipeBuffer { #else struct _EventPipeBuffer_Internal { #endif // The timestamp the buffer was created. If our clock source // is monotonic then all events in the buffer should have // timestamp >= this one. If not then all bets are off. ep_timestamp_t creation_timestamp; // Thread that is/was allowed to write into this buffer when state == WRITABLE. EventPipeThread *writer_thread; // A pointer to the actual buffer. uint8_t *buffer; // The current write pointer. uint8_t *current; // The max write pointer (end of the buffer). uint8_t *limit; // Pointer to the current event being read. EventPipeEventInstance *current_read_event; // Each buffer will become part of a per-thread linked list of buffers. // The linked list is invasive, thus we declare the pointers here. EventPipeBuffer *prev_buffer; EventPipeBuffer *next_buffer; // State transition WRITABLE -> READ_ONLY only occurs while holding the writer_thread->rt_lock; // It can be read at any time volatile uint32_t state; // The sequence number corresponding to current_read_event // Prior to read iteration it is the sequence number of the first event in the buffer uint32_t event_sequence_number; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_BUFFER_GETTER_SETTER) struct _EventPipeBuffer { uint8_t _internal [sizeof (struct _EventPipeBuffer_Internal)]; }; #endif EP_DEFINE_GETTER(EventPipeBuffer *, buffer, ep_timestamp_t, creation_timestamp) EP_DEFINE_GETTER(EventPipeBuffer *, buffer, uint8_t *, buffer) EP_DEFINE_GETTER(EventPipeBuffer *, buffer, uint8_t *, limit) EP_DEFINE_GETTER_REF(EventPipeBuffer *, buffer, volatile uint32_t *, state) EP_DEFINE_GETTER(EventPipeBuffer *, buffer, EventPipeBuffer *, prev_buffer) EP_DEFINE_SETTER(EventPipeBuffer *, buffer, EventPipeBuffer *, prev_buffer) EP_DEFINE_GETTER(EventPipeBuffer *, buffer, EventPipeBuffer *, next_buffer) EP_DEFINE_SETTER(EventPipeBuffer *, buffer, EventPipeBuffer *, next_buffer) EP_DEFINE_GETTER(EventPipeBuffer *, buffer, EventPipeThread *, writer_thread) EventPipeBuffer * ep_buffer_alloc ( uint32_t buffer_size, EventPipeThread *writer_thread, uint32_t event_sequence_number); void ep_buffer_free (EventPipeBuffer *buffer); static inline uint32_t ep_buffer_get_size (const EventPipeBuffer *buffer) { return (uint32_t)(ep_buffer_get_limit (buffer) - ep_buffer_get_buffer (buffer)); } static EP_ALWAYS_INLINE uint8_t * ep_buffer_get_next_aligned_address (const EventPipeBuffer *buffer, uint8_t *address) { EP_ASSERT (ep_buffer_get_buffer (buffer) <= address && address <= ep_buffer_get_limit (buffer)); address = (uint8_t *)EP_ALIGN_UP (address, EP_BUFFER_ALIGNMENT_SIZE); EP_ASSERT ((size_t)address % EP_BUFFER_ALIGNMENT_SIZE == 0); return address; } // Write an event to the buffer. // An optional stack trace can be provided for sample profiler events. // Otherwise, if a stack trace is needed, one will be automatically collected. // Returns: // - true: The write succeeded. // - false: The write failed. In this case, the buffer should be considered full. bool ep_buffer_write_event ( EventPipeBuffer *buffer, ep_rt_thread_handle_t thread, EventPipeSession *session, EventPipeEvent *ep_event, EventPipeEventPayload *payload, const uint8_t *activity_id, const uint8_t *related_activity_id, EventPipeStackContents *stack); // Advances read cursor to the next event or NULL if there aren't any more. When the // buffer is first made readable the cursor is automatically positioned on the first // event or NULL if there are no events in the buffer. void ep_buffer_move_next_read_event (EventPipeBuffer *buffer); // Returns the event at the current read cursor. The returned event pointer is valid // until the buffer is deleted. EventPipeEventInstance * ep_buffer_get_current_read_event (const EventPipeBuffer *buffer); // Gets the sequence number of the event corresponding to get_current_read_event (). uint32_t ep_buffer_get_current_sequence_number (const EventPipeBuffer *buffer); // Check the state of the buffer. EventPipeBufferState ep_buffer_get_volatile_state (const EventPipeBuffer *buffer); // Convert the buffer writable to readable. // _Requires_lock_held (thread) void ep_buffer_convert_to_read_only (EventPipeBuffer *buffer); #ifdef EP_CHECKED_BUILD bool ep_buffer_ensure_consistency (const EventPipeBuffer *buffer); #endif #endif /* ENABLE_PERFTRACING */ #endif /* __EVENTPIPE_BUFFER_H__ */
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/inc/jiteeversionguid.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. ////////////////////////////////////////////////////////////////////////////////////////////////////////// // // NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE // // #JITEEVersionIdentifier // // This GUID represents the version of the JIT/EE interface. Any time the interface between the JIT and // the EE changes (by adding or removing methods to any interface shared between them), this GUID should // be changed. This is the identifier verified by ICorJitCompiler::getVersionIdentifier(). // // You can use "uuidgen.exe -s" to generate this value. // // Note that this file is parsed by some tools, namely superpmi.py, so make sure the first line is exactly // of the form: // // constexpr GUID JITEEVersionIdentifier = { /* 1776ab48-edfa-49be-a11f-ec216b28174c */ // // (without the leading slashes or spaces). // // See docs/project/updating-jitinterface.md for details // // **** NOTE TO INTEGRATORS: // // If there is a merge conflict here, because the version changed in two different places, you must // create a **NEW** GUID, not simply choose one or the other! // // NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE // ////////////////////////////////////////////////////////////////////////////////////////////////////////// // #ifndef GUID_DEFINED typedef struct _GUID { uint32_t Data1; // NOTE: diff from Win32, for LP64 uint16_t Data2; uint16_t Data3; uint8_t Data4[ 8 ]; } GUID; typedef const GUID *LPCGUID; #define GUID_DEFINED #endif // !GUID_DEFINED constexpr GUID JITEEVersionIdentifier = { /* bcc99ca6-5291-4cc0-a5d9-2758456053f3 */ 0xbcc99ca6, 0x5291, 0x4cc0, { 0xa5, 0xd9, 0x27, 0x58, 0x45, 0x60, 0x53, 0xf3 } }; ////////////////////////////////////////////////////////////////////////////////////////////////////////// // // END JITEEVersionIdentifier // //////////////////////////////////////////////////////////////////////////////////////////////////////////
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. ////////////////////////////////////////////////////////////////////////////////////////////////////////// // // NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE // // #JITEEVersionIdentifier // // This GUID represents the version of the JIT/EE interface. Any time the interface between the JIT and // the EE changes (by adding or removing methods to any interface shared between them), this GUID should // be changed. This is the identifier verified by ICorJitCompiler::getVersionIdentifier(). // // You can use "uuidgen.exe -s" to generate this value. // // Note that this file is parsed by some tools, namely superpmi.py, so make sure the first line is exactly // of the form: // // constexpr GUID JITEEVersionIdentifier = { /* 1776ab48-edfa-49be-a11f-ec216b28174c */ // // (without the leading slashes or spaces). // // See docs/project/updating-jitinterface.md for details // // **** NOTE TO INTEGRATORS: // // If there is a merge conflict here, because the version changed in two different places, you must // create a **NEW** GUID, not simply choose one or the other! // // NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE // ////////////////////////////////////////////////////////////////////////////////////////////////////////// // #ifndef GUID_DEFINED typedef struct _GUID { uint32_t Data1; // NOTE: diff from Win32, for LP64 uint16_t Data2; uint16_t Data3; uint8_t Data4[ 8 ]; } GUID; typedef const GUID *LPCGUID; #define GUID_DEFINED #endif // !GUID_DEFINED constexpr GUID JITEEVersionIdentifier = { /* bcc99ca6-5291-4cc0-a5d9-2758456053f3 */ 0xbcc99ca6, 0x5291, 0x4cc0, { 0xa5, 0xd9, 0x27, 0x58, 0x45, 0x60, 0x53, 0xf3 } }; ////////////////////////////////////////////////////////////////////////////////////////////////////////// // // END JITEEVersionIdentifier // //////////////////////////////////////////////////////////////////////////////////////////////////////////
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/pal/tests/palsuite/c_runtime/_vsnwprintf_s/test4/test4.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test4.c ** ** Purpose: Test #4 for the _vsnwprintf_s function. ** ** **===================================================================*/ #include <palsuite.h> #include "../_vsnwprintf_s.h" /* memcmp is used to verify the results, so this test is dependent on it. */ /* ditto with wcslen */ static void DoPointerTest(WCHAR *formatstr, void* param, WCHAR* paramstr, WCHAR *checkstr1) { WCHAR buf[256] = { 0 }; TestVsnwprintf_s(buf, 256, formatstr, param); if (memcmp(buf, checkstr1, wcslen(checkstr1) + 2) != 0) { Fail("ERROR: failed to insert pointer to %#p into \"%s\"\n" "Expected \"%s\" got \"%s\".\n", paramstr, convertC(formatstr), convertC(checkstr1), convertC(buf)); } } static void DoI64DoubleTest(WCHAR *formatstr, INT64 value, WCHAR *valuestr, WCHAR *checkstr1) { WCHAR buf[256] = { 0 }; TestVsnwprintf_s(buf, 256, formatstr, value); if (memcmp(buf, checkstr1, wcslen(checkstr1) + 2) != 0) { Fail("ERROR: failed to insert %s into \"%s\"\n" "Expected \"%s\", got \"%s\".\n", value, convertC(formatstr), convertC(checkstr1), convertC(buf)); } } PALTEST(c_runtime__vsnwprintf_s_test4_paltest_vsnwprintf_test4, "c_runtime/_vsnwprintf_s/test4/paltest_vsnwprintf_test4") { void *ptr = (void*) 0x123456; INT64 lptr = I64(0x1234567887654321); if (PAL_Initialize(argc, argv) != 0) { return(FAIL); } /* ** Run only on 64 bit platforms */ #if defined(HOST_64BIT) Trace("Testing for 64 Bit Platforms \n"); DoPointerTest(convert("%p"), NULL, convert("NULL"), convert("00000000")); DoPointerTest(convert("%p"), ptr, convert("pointer to 0x123456"), convert("0000000000123456")); DoPointerTest(convert("%17p"), ptr, convert("pointer to 0x123456"), convert(" 0000000000123456")); DoPointerTest(convert("%17p"), ptr, convert("pointer to 0x123456"), convert(" 0000000000123456")); DoPointerTest(convert("%-17p"), ptr, convert("pointer to 0x123456"), convert("0000000000123456 ")); DoPointerTest(convert("%+p"), ptr, convert("pointer to 0x123456"), convert("0000000000123456")); DoPointerTest(convert("%#p"), ptr, convert("pointer to 0x123456"), convert("0X0000000000123456")); DoPointerTest(convert("%lp"), ptr, convert("pointer to 0x123456"), convert("00123456")); DoPointerTest(convert("%hp"), ptr, convert("pointer to 0x123456"), convert("00003456")); DoPointerTest(convert("%Lp"), ptr, convert("pointer to 0x123456"), convert("00123456")); DoI64DoubleTest(convert("%I64p"), lptr, convert("pointer to 0x1234567887654321"), convert("1234567887654321")); #else Trace("Testing for Non 64 Bit Platforms \n"); DoPointerTest(convert("%p"), NULL, convert("NULL"), convert("00000000")); DoPointerTest(convert("%p"), ptr, convert("pointer to 0x123456"), convert("00123456")); DoPointerTest(convert("%9p"), ptr, convert("pointer to 0x123456"), convert(" 00123456")); DoPointerTest(convert("%09p"), ptr, convert("pointer to 0x123456"), convert(" 00123456")); DoPointerTest(convert("%-9p"), ptr, convert("pointer to 0x123456"), convert("00123456 ")); DoPointerTest(convert("%+p"), ptr, convert("pointer to 0x123456"), convert("00123456")); DoPointerTest(convert("%#p"), ptr, convert("pointer to 0x123456"), convert("0X00123456")); DoPointerTest(convert("%lp"), ptr, convert("pointer to 0x123456"), convert("00123456")); DoPointerTest(convert("%hp"), ptr, convert("pointer to 0x123456"), convert("00003456")); DoPointerTest(convert("%Lp"), ptr, convert("pointer to 0x123456"), convert("00123456")); DoI64DoubleTest(convert("%I64p"), lptr, convert("pointer to 0x1234567887654321"), convert("1234567887654321")); #endif PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test4.c ** ** Purpose: Test #4 for the _vsnwprintf_s function. ** ** **===================================================================*/ #include <palsuite.h> #include "../_vsnwprintf_s.h" /* memcmp is used to verify the results, so this test is dependent on it. */ /* ditto with wcslen */ static void DoPointerTest(WCHAR *formatstr, void* param, WCHAR* paramstr, WCHAR *checkstr1) { WCHAR buf[256] = { 0 }; TestVsnwprintf_s(buf, 256, formatstr, param); if (memcmp(buf, checkstr1, wcslen(checkstr1) + 2) != 0) { Fail("ERROR: failed to insert pointer to %#p into \"%s\"\n" "Expected \"%s\" got \"%s\".\n", paramstr, convertC(formatstr), convertC(checkstr1), convertC(buf)); } } static void DoI64DoubleTest(WCHAR *formatstr, INT64 value, WCHAR *valuestr, WCHAR *checkstr1) { WCHAR buf[256] = { 0 }; TestVsnwprintf_s(buf, 256, formatstr, value); if (memcmp(buf, checkstr1, wcslen(checkstr1) + 2) != 0) { Fail("ERROR: failed to insert %s into \"%s\"\n" "Expected \"%s\", got \"%s\".\n", value, convertC(formatstr), convertC(checkstr1), convertC(buf)); } } PALTEST(c_runtime__vsnwprintf_s_test4_paltest_vsnwprintf_test4, "c_runtime/_vsnwprintf_s/test4/paltest_vsnwprintf_test4") { void *ptr = (void*) 0x123456; INT64 lptr = I64(0x1234567887654321); if (PAL_Initialize(argc, argv) != 0) { return(FAIL); } /* ** Run only on 64 bit platforms */ #if defined(HOST_64BIT) Trace("Testing for 64 Bit Platforms \n"); DoPointerTest(convert("%p"), NULL, convert("NULL"), convert("00000000")); DoPointerTest(convert("%p"), ptr, convert("pointer to 0x123456"), convert("0000000000123456")); DoPointerTest(convert("%17p"), ptr, convert("pointer to 0x123456"), convert(" 0000000000123456")); DoPointerTest(convert("%17p"), ptr, convert("pointer to 0x123456"), convert(" 0000000000123456")); DoPointerTest(convert("%-17p"), ptr, convert("pointer to 0x123456"), convert("0000000000123456 ")); DoPointerTest(convert("%+p"), ptr, convert("pointer to 0x123456"), convert("0000000000123456")); DoPointerTest(convert("%#p"), ptr, convert("pointer to 0x123456"), convert("0X0000000000123456")); DoPointerTest(convert("%lp"), ptr, convert("pointer to 0x123456"), convert("00123456")); DoPointerTest(convert("%hp"), ptr, convert("pointer to 0x123456"), convert("00003456")); DoPointerTest(convert("%Lp"), ptr, convert("pointer to 0x123456"), convert("00123456")); DoI64DoubleTest(convert("%I64p"), lptr, convert("pointer to 0x1234567887654321"), convert("1234567887654321")); #else Trace("Testing for Non 64 Bit Platforms \n"); DoPointerTest(convert("%p"), NULL, convert("NULL"), convert("00000000")); DoPointerTest(convert("%p"), ptr, convert("pointer to 0x123456"), convert("00123456")); DoPointerTest(convert("%9p"), ptr, convert("pointer to 0x123456"), convert(" 00123456")); DoPointerTest(convert("%09p"), ptr, convert("pointer to 0x123456"), convert(" 00123456")); DoPointerTest(convert("%-9p"), ptr, convert("pointer to 0x123456"), convert("00123456 ")); DoPointerTest(convert("%+p"), ptr, convert("pointer to 0x123456"), convert("00123456")); DoPointerTest(convert("%#p"), ptr, convert("pointer to 0x123456"), convert("0X00123456")); DoPointerTest(convert("%lp"), ptr, convert("pointer to 0x123456"), convert("00123456")); DoPointerTest(convert("%hp"), ptr, convert("pointer to 0x123456"), convert("00003456")); DoPointerTest(convert("%Lp"), ptr, convert("pointer to 0x123456"), convert("00123456")); DoI64DoubleTest(convert("%I64p"), lptr, convert("pointer to 0x1234567887654321"), convert("1234567887654321")); #endif PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/pal/tests/palsuite/threading/WaitForSingleObject/test1/test1.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: test1.c ** ** Purpose: Test for WaitForSingleObjectTest. Create two events, one ** with a TRUE and one with FALSE intial state. Ensure that WaitForSingle ** returns correct values for each of these. ** ** **=========================================================*/ #include <palsuite.h> BOOL WaitForSingleObjectTest() { BOOL bRet = FALSE; DWORD dwRet = 0; LPSECURITY_ATTRIBUTES lpEventAttributes = 0; BOOL bManualReset = TRUE; BOOL bInitialState = TRUE; HANDLE hEvent; /* Create an event, and ensure the HANDLE is valid */ hEvent = CreateEvent(lpEventAttributes, bManualReset, bInitialState, NULL); if (hEvent != INVALID_HANDLE_VALUE) { /* Call WaitForSingleObject with 0 time on the event. It should return WAIT_OBJECT_0 */ dwRet = WaitForSingleObject(hEvent,0); if (dwRet != WAIT_OBJECT_0) { Trace("WaitForSingleObjectTest:WaitForSingleObject failed (%x)\n", GetLastError()); } else { bRet = CloseHandle(hEvent); if (!bRet) { Trace("WaitForSingleObjectTest:CloseHandle failed (%x)\n", GetLastError()); } } } else { Trace("WaitForSingleObjectTest:CreateEvent failed (%x)\n", GetLastError()); } /* If the first section passed, Create another event, with the intial state being FALSE this time. */ if (bRet) { bRet = FALSE; bInitialState = FALSE; hEvent = CreateEvent( lpEventAttributes, bManualReset, bInitialState, NULL); if (hEvent != INVALID_HANDLE_VALUE) { /* Test WaitForSingleObject and ensure that it returns WAIT_TIMEOUT in this case. */ dwRet = WaitForSingleObject(hEvent,0); if (dwRet != WAIT_TIMEOUT) { Trace("WaitForSingleObjectTest:WaitForSingleObject failed (%x)\n", GetLastError()); } else { bRet = CloseHandle(hEvent); if (!bRet) { Trace("WaitForSingleObjectTest:CloseHandle failed (%x)\n", GetLastError()); } } } else { Trace("WaitForSingleObjectTest::CreateEvent failed (%x)\n", GetLastError()); } } return bRet; } PALTEST(threading_WaitForSingleObject_test1_paltest_waitforsingleobject_test1, "threading/WaitForSingleObject/test1/paltest_waitforsingleobject_test1") { if(0 != (PAL_Initialize(argc, argv))) { return ( FAIL ); } if(!WaitForSingleObjectTest()) { Fail ("Test failed\n"); } PAL_Terminate(); return ( PASS ); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: test1.c ** ** Purpose: Test for WaitForSingleObjectTest. Create two events, one ** with a TRUE and one with FALSE intial state. Ensure that WaitForSingle ** returns correct values for each of these. ** ** **=========================================================*/ #include <palsuite.h> BOOL WaitForSingleObjectTest() { BOOL bRet = FALSE; DWORD dwRet = 0; LPSECURITY_ATTRIBUTES lpEventAttributes = 0; BOOL bManualReset = TRUE; BOOL bInitialState = TRUE; HANDLE hEvent; /* Create an event, and ensure the HANDLE is valid */ hEvent = CreateEvent(lpEventAttributes, bManualReset, bInitialState, NULL); if (hEvent != INVALID_HANDLE_VALUE) { /* Call WaitForSingleObject with 0 time on the event. It should return WAIT_OBJECT_0 */ dwRet = WaitForSingleObject(hEvent,0); if (dwRet != WAIT_OBJECT_0) { Trace("WaitForSingleObjectTest:WaitForSingleObject failed (%x)\n", GetLastError()); } else { bRet = CloseHandle(hEvent); if (!bRet) { Trace("WaitForSingleObjectTest:CloseHandle failed (%x)\n", GetLastError()); } } } else { Trace("WaitForSingleObjectTest:CreateEvent failed (%x)\n", GetLastError()); } /* If the first section passed, Create another event, with the intial state being FALSE this time. */ if (bRet) { bRet = FALSE; bInitialState = FALSE; hEvent = CreateEvent( lpEventAttributes, bManualReset, bInitialState, NULL); if (hEvent != INVALID_HANDLE_VALUE) { /* Test WaitForSingleObject and ensure that it returns WAIT_TIMEOUT in this case. */ dwRet = WaitForSingleObject(hEvent,0); if (dwRet != WAIT_TIMEOUT) { Trace("WaitForSingleObjectTest:WaitForSingleObject failed (%x)\n", GetLastError()); } else { bRet = CloseHandle(hEvent); if (!bRet) { Trace("WaitForSingleObjectTest:CloseHandle failed (%x)\n", GetLastError()); } } } else { Trace("WaitForSingleObjectTest::CreateEvent failed (%x)\n", GetLastError()); } } return bRet; } PALTEST(threading_WaitForSingleObject_test1_paltest_waitforsingleobject_test1, "threading/WaitForSingleObject/test1/paltest_waitforsingleobject_test1") { if(0 != (PAL_Initialize(argc, argv))) { return ( FAIL ); } if(!WaitForSingleObjectTest()) { Fail ("Test failed\n"); } PAL_Terminate(); return ( PASS ); }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/utilcode/check.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //================================================================================ // Assertion checking infrastructure //================================================================================ #include "stdafx.h" #include <check.h> #include <sstring.h> #include <ex.h> #include <contract.h> #ifdef _DEBUG size_t CHECK::s_cLeakedBytes = 0; size_t CHECK::s_cNumFailures = 0; thread_local LONG CHECK::t_count; #endif BOOL CHECK::s_neverEnforceAsserts = 0; // Currently used for scan SPECIAL_HOLDER_* trickery DEBUG_NOINLINE BOOL CHECK::EnforceAssert_StaticCheckOnly() { return s_neverEnforceAsserts; } #ifdef ENABLE_CONTRACTS_IMPL // Need a place to stick this, there is no contract.cpp... BOOL BaseContract::s_alwaysEnforceContracts = 1; #define SPECIALIZE_CONTRACT_VIOLATION_HOLDER(mask) \ template<> void ContractViolationHolder<mask>::Enter() \ { \ SCAN_SCOPE_BEGIN; \ ANNOTATION_VIOLATION(mask); \ EnterInternal(mask); \ }; #define SPECIALIZE_AUTO_CLEANUP_CONTRACT_VIOLATION_HOLDER(mask) \ template<> AutoCleanupContractViolationHolder<mask>::AutoCleanupContractViolationHolder(BOOL fEnterViolation) \ { \ SCAN_SCOPE_BEGIN; \ ANNOTATION_VIOLATION(mask); \ EnterInternal(fEnterViolation ? mask : 0); \ }; #define SPECIALIZED_VIOLATION(mask) \ SPECIALIZE_CONTRACT_VIOLATION_HOLDER(mask); \ SPECIALIZE_AUTO_CLEANUP_CONTRACT_VIOLATION_HOLDER(mask) // There is a special case that requires 0... Why??? Who knows, let's fix that case. SPECIALIZED_VIOLATION(0); // Basic Specializations SPECIALIZED_VIOLATION(AllViolation); SPECIALIZED_VIOLATION(ThrowsViolation); SPECIALIZED_VIOLATION(GCViolation); SPECIALIZED_VIOLATION(ModeViolation); SPECIALIZED_VIOLATION(FaultViolation); SPECIALIZED_VIOLATION(FaultNotFatal); SPECIALIZED_VIOLATION(HostViolation); SPECIALIZED_VIOLATION(TakesLockViolation); SPECIALIZED_VIOLATION(LoadsTypeViolation); // Other Specializations used by the RUNTIME, if you get a compile time error you need // to add the specific specialization that you are using here. SPECIALIZED_VIOLATION(ThrowsViolation|GCViolation); SPECIALIZED_VIOLATION(ThrowsViolation|GCViolation|TakesLockViolation); SPECIALIZED_VIOLATION(ThrowsViolation|ModeViolation); SPECIALIZED_VIOLATION(ThrowsViolation|FaultNotFatal); SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation); SPECIALIZED_VIOLATION(ThrowsViolation|TakesLockViolation); SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|TakesLockViolation); SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|GCViolation); SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|GCViolation|TakesLockViolation|LoadsTypeViolation); SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|GCViolation|ModeViolation); SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|GCViolation|ModeViolation|FaultNotFatal); SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|GCViolation|ModeViolation|FaultNotFatal|TakesLockViolation); SPECIALIZED_VIOLATION(GCViolation|FaultViolation); SPECIALIZED_VIOLATION(GCViolation|FaultNotFatal|ModeViolation); SPECIALIZED_VIOLATION(GCViolation|FaultNotFatal|TakesLockViolation); SPECIALIZED_VIOLATION(GCViolation|FaultNotFatal|TakesLockViolation|ModeViolation); SPECIALIZED_VIOLATION(GCViolation|ModeViolation); SPECIALIZED_VIOLATION(FaultViolation|FaultNotFatal); SPECIALIZED_VIOLATION(FaultNotFatal|TakesLockViolation); #undef SPECIALIZED_VIOLATION #undef SPECIALIZE_AUTO_CLEANUP_CONTRACT_VIOLATION_HOLDER #undef SPECIALIZE_CONTRACT_VIOLATION_HOLDER #endif // Trigger triggers the actual check failure. The trigger may provide a reason // to include in the failure message. void CHECK::Trigger(LPCSTR reason) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; const char *messageString = NULL; NewHolder<StackScratchBuffer> pScratch(NULL); NewHolder<StackSString> pMessage(NULL); EX_TRY { FAULT_NOT_FATAL(); pScratch = new StackScratchBuffer(); pMessage = new StackSString(); pMessage->AppendASCII(reason); pMessage->AppendASCII(": "); if (m_message != NULL) pMessage->AppendASCII((m_message != (LPCSTR)1) ? m_message : "<runtime check failure>"); #if _DEBUG pMessage->AppendASCII("FAILED: "); pMessage->AppendASCII(m_condition); #endif messageString = pMessage->GetANSI(*pScratch); } EX_CATCH { messageString = "<exception occurred while building failure description>"; } EX_END_CATCH(SwallowAllExceptions); #if _DEBUG DbgAssertDialog((char*)m_file, m_line, (char *)messageString); #else OutputDebugStringA(messageString); DebugBreak(); #endif } #ifdef _DEBUG // Setup records context info after a failure. void CHECK::Setup(LPCSTR message, LPCSTR condition, LPCSTR file, INT line) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_SUPPORTS_DAC_HOST_ONLY; // // It might be nice to collect all of the message here. But for now, we will just // retain the innermost one. // if (m_message == NULL) { m_message = message; m_condition = condition; m_file = file; m_line = line; } #ifdef _DEBUG else if (IsInAssert()) { EX_TRY { FAULT_NOT_FATAL(); // Try to build a stack of condition failures StackSString context; context.Printf("%s\n\t%s%s FAILED: %s\n\t\t%s, line: %d", m_condition, message && *message ? message : "", message && *message ? ": " : "", condition, file, line); m_condition = AllocateDynamicMessage(context); } EX_CATCH { // If anything goes wrong, we don't push extra context } EX_END_CATCH(SwallowAllExceptions) } #endif #if defined(_DEBUG_IMPL) if (IsInAssert() && IsDebuggerPresent()) { DebugBreak(); } #endif } LPCSTR CHECK::FormatMessage(LPCSTR messageFormat, ...) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; LPCSTR result = NULL; // We never delete this allocated string. A dtor would conflict with places // we use this around SEH stuff. We could have some fancy stack-based allocator, // but that's too much work for now. In fact we believe that leaking is a reasonable // policy, since allocations will only happen on a failed assert, and a failed assert // will generally be fatal to the process. // The most common place for format strings will be in an assert; in // which case we don't care about leaking. // But to be safe, if we're not-inside an assert, then we'll just use // the format string literal to avoid allocated (and leaking) any memory. CHECK chk; if (!chk.IsInAssert()) result = messageFormat; else { // This path is only run in debug. TakesLockViolation suppresses // problems with SString below. CONTRACT_VIOLATION(FaultNotFatal|TakesLockViolation); EX_TRY { SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; // Format it. va_list args; va_start( args, messageFormat); SString s; s.VPrintf(messageFormat, args); va_end(args); result = AllocateDynamicMessage(s); } EX_CATCH { // If anything goes wrong, just use the format string. result = messageFormat; } EX_END_CATCH(SwallowAllExceptions) } return result; } LPCSTR CHECK::AllocateDynamicMessage(const SString &s) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; // Make a copy of it. StackScratchBuffer buffer; const char * pMsg = s.GetANSI(buffer); // Must copy that into our own field. size_t len = strlen(pMsg) + 1; char * p = new char[len]; strcpy(p, pMsg); // But we'll keep counters of how much we're leaking for diagnostic purposes. s_cLeakedBytes += len; s_cNumFailures ++; // This should never fire. // Note use an ASSERTE (not a check) to avoid a recursive deadlock. _ASSERTE(s_cLeakedBytes < 10000 || !"Warning - check misuse - leaked over 10,000B due to unexpected usage pattern"); return p; } #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //================================================================================ // Assertion checking infrastructure //================================================================================ #include "stdafx.h" #include <check.h> #include <sstring.h> #include <ex.h> #include <contract.h> #ifdef _DEBUG size_t CHECK::s_cLeakedBytes = 0; size_t CHECK::s_cNumFailures = 0; thread_local LONG CHECK::t_count; #endif BOOL CHECK::s_neverEnforceAsserts = 0; // Currently used for scan SPECIAL_HOLDER_* trickery DEBUG_NOINLINE BOOL CHECK::EnforceAssert_StaticCheckOnly() { return s_neverEnforceAsserts; } #ifdef ENABLE_CONTRACTS_IMPL // Need a place to stick this, there is no contract.cpp... BOOL BaseContract::s_alwaysEnforceContracts = 1; #define SPECIALIZE_CONTRACT_VIOLATION_HOLDER(mask) \ template<> void ContractViolationHolder<mask>::Enter() \ { \ SCAN_SCOPE_BEGIN; \ ANNOTATION_VIOLATION(mask); \ EnterInternal(mask); \ }; #define SPECIALIZE_AUTO_CLEANUP_CONTRACT_VIOLATION_HOLDER(mask) \ template<> AutoCleanupContractViolationHolder<mask>::AutoCleanupContractViolationHolder(BOOL fEnterViolation) \ { \ SCAN_SCOPE_BEGIN; \ ANNOTATION_VIOLATION(mask); \ EnterInternal(fEnterViolation ? mask : 0); \ }; #define SPECIALIZED_VIOLATION(mask) \ SPECIALIZE_CONTRACT_VIOLATION_HOLDER(mask); \ SPECIALIZE_AUTO_CLEANUP_CONTRACT_VIOLATION_HOLDER(mask) // There is a special case that requires 0... Why??? Who knows, let's fix that case. SPECIALIZED_VIOLATION(0); // Basic Specializations SPECIALIZED_VIOLATION(AllViolation); SPECIALIZED_VIOLATION(ThrowsViolation); SPECIALIZED_VIOLATION(GCViolation); SPECIALIZED_VIOLATION(ModeViolation); SPECIALIZED_VIOLATION(FaultViolation); SPECIALIZED_VIOLATION(FaultNotFatal); SPECIALIZED_VIOLATION(HostViolation); SPECIALIZED_VIOLATION(TakesLockViolation); SPECIALIZED_VIOLATION(LoadsTypeViolation); // Other Specializations used by the RUNTIME, if you get a compile time error you need // to add the specific specialization that you are using here. SPECIALIZED_VIOLATION(ThrowsViolation|GCViolation); SPECIALIZED_VIOLATION(ThrowsViolation|GCViolation|TakesLockViolation); SPECIALIZED_VIOLATION(ThrowsViolation|ModeViolation); SPECIALIZED_VIOLATION(ThrowsViolation|FaultNotFatal); SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation); SPECIALIZED_VIOLATION(ThrowsViolation|TakesLockViolation); SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|TakesLockViolation); SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|GCViolation); SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|GCViolation|TakesLockViolation|LoadsTypeViolation); SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|GCViolation|ModeViolation); SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|GCViolation|ModeViolation|FaultNotFatal); SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|GCViolation|ModeViolation|FaultNotFatal|TakesLockViolation); SPECIALIZED_VIOLATION(GCViolation|FaultViolation); SPECIALIZED_VIOLATION(GCViolation|FaultNotFatal|ModeViolation); SPECIALIZED_VIOLATION(GCViolation|FaultNotFatal|TakesLockViolation); SPECIALIZED_VIOLATION(GCViolation|FaultNotFatal|TakesLockViolation|ModeViolation); SPECIALIZED_VIOLATION(GCViolation|ModeViolation); SPECIALIZED_VIOLATION(FaultViolation|FaultNotFatal); SPECIALIZED_VIOLATION(FaultNotFatal|TakesLockViolation); #undef SPECIALIZED_VIOLATION #undef SPECIALIZE_AUTO_CLEANUP_CONTRACT_VIOLATION_HOLDER #undef SPECIALIZE_CONTRACT_VIOLATION_HOLDER #endif // Trigger triggers the actual check failure. The trigger may provide a reason // to include in the failure message. void CHECK::Trigger(LPCSTR reason) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; const char *messageString = NULL; NewHolder<StackScratchBuffer> pScratch(NULL); NewHolder<StackSString> pMessage(NULL); EX_TRY { FAULT_NOT_FATAL(); pScratch = new StackScratchBuffer(); pMessage = new StackSString(); pMessage->AppendASCII(reason); pMessage->AppendASCII(": "); if (m_message != NULL) pMessage->AppendASCII((m_message != (LPCSTR)1) ? m_message : "<runtime check failure>"); #if _DEBUG pMessage->AppendASCII("FAILED: "); pMessage->AppendASCII(m_condition); #endif messageString = pMessage->GetANSI(*pScratch); } EX_CATCH { messageString = "<exception occurred while building failure description>"; } EX_END_CATCH(SwallowAllExceptions); #if _DEBUG DbgAssertDialog((char*)m_file, m_line, (char *)messageString); #else OutputDebugStringA(messageString); DebugBreak(); #endif } #ifdef _DEBUG // Setup records context info after a failure. void CHECK::Setup(LPCSTR message, LPCSTR condition, LPCSTR file, INT line) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_SUPPORTS_DAC_HOST_ONLY; // // It might be nice to collect all of the message here. But for now, we will just // retain the innermost one. // if (m_message == NULL) { m_message = message; m_condition = condition; m_file = file; m_line = line; } #ifdef _DEBUG else if (IsInAssert()) { EX_TRY { FAULT_NOT_FATAL(); // Try to build a stack of condition failures StackSString context; context.Printf("%s\n\t%s%s FAILED: %s\n\t\t%s, line: %d", m_condition, message && *message ? message : "", message && *message ? ": " : "", condition, file, line); m_condition = AllocateDynamicMessage(context); } EX_CATCH { // If anything goes wrong, we don't push extra context } EX_END_CATCH(SwallowAllExceptions) } #endif #if defined(_DEBUG_IMPL) if (IsInAssert() && IsDebuggerPresent()) { DebugBreak(); } #endif } LPCSTR CHECK::FormatMessage(LPCSTR messageFormat, ...) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; LPCSTR result = NULL; // We never delete this allocated string. A dtor would conflict with places // we use this around SEH stuff. We could have some fancy stack-based allocator, // but that's too much work for now. In fact we believe that leaking is a reasonable // policy, since allocations will only happen on a failed assert, and a failed assert // will generally be fatal to the process. // The most common place for format strings will be in an assert; in // which case we don't care about leaking. // But to be safe, if we're not-inside an assert, then we'll just use // the format string literal to avoid allocated (and leaking) any memory. CHECK chk; if (!chk.IsInAssert()) result = messageFormat; else { // This path is only run in debug. TakesLockViolation suppresses // problems with SString below. CONTRACT_VIOLATION(FaultNotFatal|TakesLockViolation); EX_TRY { SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; // Format it. va_list args; va_start( args, messageFormat); SString s; s.VPrintf(messageFormat, args); va_end(args); result = AllocateDynamicMessage(s); } EX_CATCH { // If anything goes wrong, just use the format string. result = messageFormat; } EX_END_CATCH(SwallowAllExceptions) } return result; } LPCSTR CHECK::AllocateDynamicMessage(const SString &s) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; // Make a copy of it. StackScratchBuffer buffer; const char * pMsg = s.GetANSI(buffer); // Must copy that into our own field. size_t len = strlen(pMsg) + 1; char * p = new char[len]; strcpy(p, pMsg); // But we'll keep counters of how much we're leaking for diagnostic purposes. s_cLeakedBytes += len; s_cNumFailures ++; // This should never fire. // Note use an ASSERTE (not a check) to avoid a recursive deadlock. _ASSERTE(s_cLeakedBytes < 10000 || !"Warning - check misuse - leaked over 10,000B due to unexpected usage pattern"); return p; } #endif
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/native/minipal/entrypoints.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef HAVE_MINIPAL_ENTRYPOINTS_H #define HAVE_MINIPAL_ENTRYPOINTS_H #include <stdint.h> #include <string.h> #include <minipal/utils.h> typedef struct { const char* name; const void* method; } Entry; // expands to: {"impl", (void*)impl}, #define DllImportEntry(impl) \ {#impl, (void*)impl}, static const void* minipal_resolve_dllimport(const Entry* resolutionTable, size_t tableLength, const char* name) { for (size_t i = 0; i < tableLength; i++) { if (strcmp(name, resolutionTable[i].name) == 0) { return resolutionTable[i].method; } } return NULL; } #endif // HAVE_MINIPAL_ENTRYPOINTS_H
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef HAVE_MINIPAL_ENTRYPOINTS_H #define HAVE_MINIPAL_ENTRYPOINTS_H #include <stdint.h> #include <string.h> #include <minipal/utils.h> typedef struct { const char* name; const void* method; } Entry; // expands to: {"impl", (void*)impl}, #define DllImportEntry(impl) \ {#impl, (void*)impl}, static const void* minipal_resolve_dllimport(const Entry* resolutionTable, size_t tableLength, const char* name) { for (size_t i = 0; i < tableLength; i++) { if (strcmp(name, resolutionTable[i].name) == 0) { return resolutionTable[i].method; } } return NULL; } #endif // HAVE_MINIPAL_ENTRYPOINTS_H
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/vm/stubmgr.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "common.h" #include "stubmgr.h" #include "virtualcallstub.h" #include "dllimportcallback.h" #include "stubhelpers.h" #include "asmconstants.h" #ifdef FEATURE_COMINTEROP #include "olecontexthelpers.h" #endif #ifdef LOGGING const char *GetTType( TraceType tt) { LIMITED_METHOD_CONTRACT; switch( tt ) { case TRACE_ENTRY_STUB: return "TRACE_ENTRY_STUB"; case TRACE_STUB: return "TRACE_STUB"; case TRACE_UNMANAGED: return "TRACE_UNMANAGED"; case TRACE_MANAGED: return "TRACE_MANAGED"; case TRACE_FRAME_PUSH: return "TRACE_FRAME_PUSH"; case TRACE_MGR_PUSH: return "TRACE_MGR_PUSH"; case TRACE_OTHER: return "TRACE_OTHER"; case TRACE_UNJITTED_METHOD: return "TRACE_UNJITTED_METHOD"; } return "TRACE_REALLY_WACKED"; } void LogTraceDestination(const char * szHint, PCODE stubAddr, TraceDestination * pTrace) { LIMITED_METHOD_CONTRACT; if (pTrace->GetTraceType() == TRACE_UNJITTED_METHOD) { MethodDesc * md = pTrace->GetMethodDesc(); LOG((LF_CORDB, LL_INFO10000, "'%s' yields '%s' to method 0x%p for input 0x%p.\n", szHint, GetTType(pTrace->GetTraceType()), md, stubAddr)); } else { LOG((LF_CORDB, LL_INFO10000, "'%s' yields '%s' to address 0x%p for input 0x%p.\n", szHint, GetTType(pTrace->GetTraceType()), pTrace->GetAddress(), stubAddr)); } } #endif #ifdef _DEBUG // Get a string representation of this TraceDestination // Uses the supplied buffer to store the memory (or may return a string literal). const WCHAR * TraceDestination::DbgToString(SString & buffer) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; const WCHAR * pValue = W("unknown"); #ifndef DACCESS_COMPILE if (!StubManager::IsStubLoggingEnabled()) { return W("<unavailable while native-debugging>"); } // Now that we know we're not interop-debugging, we can safely call new. SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; FAULT_NOT_FATAL(); EX_TRY { switch(this->type) { case TRACE_ENTRY_STUB: buffer.Printf("TRACE_ENTRY_STUB(addr=0x%p)", GetAddress()); pValue = buffer.GetUnicode(); break; case TRACE_STUB: buffer.Printf("TRACE_STUB(addr=0x%p)", GetAddress()); pValue = buffer.GetUnicode(); break; case TRACE_UNMANAGED: buffer.Printf("TRACE_UNMANAGED(addr=0x%p)", GetAddress()); pValue = buffer.GetUnicode(); break; case TRACE_MANAGED: buffer.Printf("TRACE_MANAGED(addr=0x%p)", GetAddress()); pValue = buffer.GetUnicode(); break; case TRACE_UNJITTED_METHOD: { MethodDesc * md = this->GetMethodDesc(); buffer.Printf("TRACE_UNJITTED_METHOD(md=0x%p, %s::%s)", md, md->m_pszDebugClassName, md->m_pszDebugMethodName); pValue = buffer.GetUnicode(); } break; case TRACE_FRAME_PUSH: buffer.Printf("TRACE_FRAME_PUSH(addr=0x%p)", GetAddress()); pValue = buffer.GetUnicode(); break; case TRACE_MGR_PUSH: buffer.Printf("TRACE_MGR_PUSH(addr=0x%p, sm=%s)", GetAddress(), this->GetStubManager()->DbgGetName()); pValue = buffer.GetUnicode(); break; case TRACE_OTHER: pValue = W("TRACE_OTHER"); break; } } EX_CATCH { pValue = W("(OOM while printing TD)"); } EX_END_CATCH(SwallowAllExceptions); #endif return pValue; } #endif void TraceDestination::InitForUnjittedMethod(MethodDesc * pDesc) { CONTRACTL { GC_NOTRIGGER; NOTHROW; MODE_ANY; PRECONDITION(CheckPointer(pDesc)); } CONTRACTL_END; _ASSERTE(pDesc->SanityCheck()); { // If this is a wrapper stub, then find the real method that it will go to and patch that. // This is more than just a convenience - converted wrapper MD to real MD is required for correct behavior. // Wrapper MDs look like unjitted MethodDescs. So when the debugger patches one, // it won't actually bind + apply the patch (it'll wait for the jit-complete instead). // But if the wrapper MD is for prejitted code, then we'll never get the Jit-complete. // Thus it'll miss the patch completely. if (pDesc->IsWrapperStub()) { MethodDesc * pNewDesc = NULL; FAULT_NOT_FATAL(); #ifndef DACCESS_COMPILE EX_TRY { pNewDesc = pDesc->GetExistingWrappedMethodDesc(); } EX_CATCH { // In case of an error, we'll just stick w/ the original method desc. } EX_END_CATCH(SwallowAllExceptions) #else // @todo - DAC needs this too, but the method is currently not DACized. // However, we don't throw here b/c the error may not be fatal. // DacNotImpl(); #endif if (pNewDesc != NULL) { pDesc = pNewDesc; LOG((LF_CORDB, LL_INFO10000, "TD::UnjittedMethod: wrapper md: %p --> %p\n", pDesc, pNewDesc)); } } } this->type = TRACE_UNJITTED_METHOD; this->pDesc = pDesc; this->stubManager = NULL; } // Initialize statics. #ifdef _DEBUG SString * StubManager::s_pDbgStubManagerLog = NULL; CrstStatic StubManager::s_DbgLogCrst; #endif SPTR_IMPL(StubManager, StubManager, g_pFirstManager); CrstStatic StubManager::s_StubManagerListCrst; //----------------------------------------------------------- // For perf reasons, the stub managers are now kept in a two // tier system: all stub managers but the VirtualStubManagers // are in the first tier. A VirtualStubManagerManager takes // care of all VirtualStubManagers, and is iterated last of // all. It does a smarter job of looking up the owning // manager for virtual stubs, checking the current and shared // appdomains before checking the remaining managers. // // Thus, this iterator will run the regular list until it // hits the end, then it will check the VSMM, then it will // end. //----------------------------------------------------------- class StubManagerIterator { public: StubManagerIterator(); ~StubManagerIterator(); void Reset(); BOOL Next(); PTR_StubManager Current(); protected: enum SMI_State { SMI_START, SMI_NORMAL, SMI_VIRTUALCALLSTUBMANAGER, SMI_END }; SMI_State m_state; PTR_StubManager m_pCurMgr; SimpleReadLockHolder m_lh; }; //----------------------------------------------------------- // Ctor //----------------------------------------------------------- StubManagerIterator::StubManagerIterator() { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; Reset(); } void StubManagerIterator::Reset() { LIMITED_METHOD_DAC_CONTRACT; m_pCurMgr = NULL; m_state = SMI_START; } //----------------------------------------------------------- // Ctor //----------------------------------------------------------- StubManagerIterator::~StubManagerIterator() { LIMITED_METHOD_DAC_CONTRACT; } //----------------------------------------------------------- // Move to the next element. Iterators are created at // start-1, so must call Next before using Current //----------------------------------------------------------- BOOL StubManagerIterator::Next() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; #ifndef DACCESS_COMPILE CAN_TAKE_LOCK; // because of m_lh.Assign() #else CANNOT_TAKE_LOCK; #endif } CONTRACTL_END; SUPPORTS_DAC; do { if (m_state == SMI_START) { m_state = SMI_NORMAL; m_pCurMgr = StubManager::g_pFirstManager; } else if (m_state == SMI_NORMAL) { if (m_pCurMgr != NULL) { m_pCurMgr = m_pCurMgr->m_pNextManager; } else { // If we've reached the end of the regular list of stub managers, then we // set the VirtualCallStubManagerManager is the current item (effectively // forcing it to always be the last manager checked). m_state = SMI_VIRTUALCALLSTUBMANAGER; VirtualCallStubManagerManager *pVCSMMgr = VirtualCallStubManagerManager::GlobalManager(); m_pCurMgr = PTR_StubManager(pVCSMMgr); #ifndef DACCESS_COMPILE m_lh.Assign(&pVCSMMgr->m_RWLock); #endif } } else if (m_state == SMI_VIRTUALCALLSTUBMANAGER) { m_state = SMI_END; m_pCurMgr = NULL; #ifndef DACCESS_COMPILE m_lh.Clear(); #endif } } while (m_state != SMI_END && m_pCurMgr == NULL); CONSISTENCY_CHECK(m_state == SMI_END || m_pCurMgr != NULL); return (m_state != SMI_END); } //----------------------------------------------------------- // Get the current contents of the iterator //----------------------------------------------------------- PTR_StubManager StubManagerIterator::Current() { LIMITED_METHOD_DAC_CONTRACT; CONSISTENCY_CHECK(m_state != SMI_START); CONSISTENCY_CHECK(m_state != SMI_END); CONSISTENCY_CHECK(CheckPointer(m_pCurMgr)); return m_pCurMgr; } #ifndef DACCESS_COMPILE //----------------------------------------------------------- //----------------------------------------------------------- StubManager::StubManager() : m_pNextManager(NULL) { LIMITED_METHOD_CONTRACT; } //----------------------------------------------------------- //----------------------------------------------------------- StubManager::~StubManager() { CONTRACTL { NOTHROW; GC_NOTRIGGER; CAN_TAKE_LOCK; // StubManager::UnlinkStubManager uses a crst PRECONDITION(CheckPointer(this)); } CONTRACTL_END; UnlinkStubManager(this); } #endif // #ifndef DACCESS_COMPILE #ifdef _DEBUG_IMPL //----------------------------------------------------------- // Verify that the stub is owned by the given stub manager // and no other stub manager. If a stub is claimed by multiple managers, // then the wrong manager may claim ownership and improperly trace the stub. //----------------------------------------------------------- BOOL StubManager::IsSingleOwner(PCODE stubAddress, StubManager * pOwner) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_CAN_TAKE_LOCK; // courtesy StubManagerIterator // ensure this stubmanager owns it. _ASSERTE(pOwner != NULL); // ensure nobody else does. bool ownerFound = false; int count = 0; StubManagerIterator it; while (it.Next()) { // Callers would have iterated till pOwner. if (!ownerFound && it.Current() != pOwner) continue; if (it.Current() == pOwner) ownerFound = true; if (it.Current()->CheckIsStub_Worker(stubAddress)) { // If you hit this assert, you can tell what 2 stub managers are conflicting by inspecting their vtable. CONSISTENCY_CHECK_MSGF((it.Current() == pOwner), ("Stub at 0x%p is owner by multiple managers (0x%p, 0x%p)", (void*) stubAddress, pOwner, it.Current())); count++; } else { _ASSERTE(it.Current() != pOwner); } } _ASSERTE(ownerFound); // We expect pOwner to be the only one to own this stub. return (count == 1); } #endif //----------------------------------------------------------- //----------------------------------------------------------- BOOL StubManager::CheckIsStub_Worker(PCODE stubStartAddress) { CONTRACTL { NOTHROW; CAN_TAKE_LOCK; // CheckIsStub_Internal can enter SimpleRWLock GC_NOTRIGGER; } CONTRACTL_END; SUPPORTS_DAC; // @todo - consider having a single check for null right up front. // Though this may cover bugs where stub-managers don't handle bad addresses. // And someone could just as easily pass (0x01) as NULL. if (stubStartAddress == NULL) { return FALSE; } struct Param { BOOL fIsStub; StubManager *pThis; TADDR stubStartAddress; } param; param.fIsStub = FALSE; param.pThis = this; param.stubStartAddress = stubStartAddress; // This may be called from DAC, and DAC + non-DAC have very different // exception handling. #ifdef DACCESS_COMPILE PAL_TRY(Param *, pParam, &param) #else Param *pParam = &param; EX_TRY #endif { SUPPORTS_DAC; #ifndef DACCESS_COMPILE // Use CheckIsStub_Internal may AV. That's ok. AVInRuntimeImplOkayHolder AVOkay; #endif // Make a Polymorphic call to derived stub manager. // Try to see if this address is for a stub. If the address is // completely bogus, then this might fault, so we protect it // with SEH. pParam->fIsStub = pParam->pThis->CheckIsStub_Internal(pParam->stubStartAddress); } #ifdef DACCESS_COMPILE PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) #else EX_CATCH #endif { LOG((LF_CORDB, LL_INFO10000, "D::GASTSI: exception indicated addr is bad.\n")); param.fIsStub = FALSE; } #ifdef DACCESS_COMPILE PAL_ENDTRY #else EX_END_CATCH(SwallowAllExceptions); #endif return param.fIsStub; } //----------------------------------------------------------- // stubAddress may be an invalid address. //----------------------------------------------------------- PTR_StubManager StubManager::FindStubManager(PCODE stubAddress) { CONTRACTL { NOTHROW; GC_NOTRIGGER; CAN_TAKE_LOCK; // courtesy StubManagerIterator } CONTRACTL_END; SUPPORTS_DAC; StubManagerIterator it; while (it.Next()) { if (it.Current()->CheckIsStub_Worker(stubAddress)) { _ASSERTE_IMPL(IsSingleOwner(stubAddress, it.Current())); return it.Current(); } } return NULL; } //----------------------------------------------------------- // Given an address, figure out a TraceDestination describing where // the instructions at that address will eventually transfer execution to. //----------------------------------------------------------- BOOL StubManager::TraceStub(PCODE stubStartAddress, TraceDestination *trace) { WRAPPER_NO_CONTRACT; StubManagerIterator it; while (it.Next()) { StubManager * pCurrent = it.Current(); if (pCurrent->CheckIsStub_Worker(stubStartAddress)) { LOG((LF_CORDB, LL_INFO10000, "StubManager::TraceStub: addr 0x%p claimed by mgr " "0x%p.\n", stubStartAddress, pCurrent)); _ASSERTE_IMPL(IsSingleOwner(stubStartAddress, pCurrent)); BOOL fValid = pCurrent->DoTraceStub(stubStartAddress, trace); #ifdef _DEBUG if (IsStubLoggingEnabled()) { DbgWriteLog("Doing TraceStub for Address 0x%p, claimed by '%s' (0x%p)\n", stubStartAddress, pCurrent->DbgGetName(), pCurrent); if (fValid) { SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; FAULT_NOT_FATAL(); SString buffer; DbgWriteLog(" td=%S\n", trace->DbgToString(buffer)); } else { DbgWriteLog(" stubmanager returned false. Does not expect to call managed code\n"); } } // logging #endif return fValid; } } if (ExecutionManager::IsManagedCode(stubStartAddress)) { trace->InitForManaged(stubStartAddress); #ifdef _DEBUG DbgWriteLog("Doing TraceStub for Address 0x%p is jitted code claimed by codemanager\n", stubStartAddress); #endif LOG((LF_CORDB, LL_INFO10000, "StubManager::TraceStub: addr 0x%p is managed code\n", stubStartAddress)); return TRUE; } LOG((LF_CORDB, LL_INFO10000, "StubManager::TraceStub: addr 0x%p unknown. TRACE_OTHER...\n", stubStartAddress)); #ifdef _DEBUG DbgWriteLog("Doing TraceStub for Address 0x%p is unknown!!!\n", stubStartAddress); #endif trace->InitForOther(stubStartAddress); return FALSE; } //----------------------------------------------------------- //----------------------------------------------------------- BOOL StubManager::FollowTrace(TraceDestination *trace) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; while (trace->GetTraceType() == TRACE_STUB) { LOG((LF_CORDB, LL_INFO10000, "StubManager::FollowTrace: TRACE_STUB for 0x%p\n", trace->GetAddress())); if (!TraceStub(trace->GetAddress(), trace)) { // // No stub manager claimed it - it must be an EE helper or something. // trace->InitForOther(trace->GetAddress()); } } LOG_TRACE_DESTINATION(trace, NULL, "StubManager::FollowTrace"); return trace->GetTraceType() != TRACE_OTHER; } #ifndef DACCESS_COMPILE //----------------------------------------------------------- //----------------------------------------------------------- void StubManager::AddStubManager(StubManager *mgr) { WRAPPER_NO_CONTRACT; CONSISTENCY_CHECK(CheckPointer(g_pFirstManager, NULL_OK)); CONSISTENCY_CHECK(CheckPointer(mgr)); GCX_COOP_NO_THREAD_BROKEN(); CrstHolder ch(&s_StubManagerListCrst); if (g_pFirstManager == NULL) { g_pFirstManager = mgr; } else { mgr->m_pNextManager = g_pFirstManager; g_pFirstManager = mgr; } LOG((LF_CORDB, LL_EVERYTHING, "StubManager::AddStubManager - 0x%p (vptr %p)\n", mgr, (*(PVOID*)mgr))); } //----------------------------------------------------------- // NOTE: The runtime MUST be suspended to use this in a // truly safe manner. //----------------------------------------------------------- void StubManager::UnlinkStubManager(StubManager *mgr) { STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_CAN_TAKE_LOCK; CONSISTENCY_CHECK(CheckPointer(g_pFirstManager, NULL_OK)); CONSISTENCY_CHECK(CheckPointer(mgr)); CrstHolder ch(&s_StubManagerListCrst); StubManager **m = &g_pFirstManager; while (*m != NULL) { if (*m == mgr) { *m = (*m)->m_pNextManager; return; } m = &(*m)->m_pNextManager; } } #endif // #ifndef DACCESS_COMPILE #ifdef DACCESS_COMPILE //----------------------------------------------------------- //----------------------------------------------------------- void StubManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; // Report the global list head. DacEnumMemoryRegion(DacGlobalBase() + g_dacGlobals.StubManager__g_pFirstManager, sizeof(TADDR)); // // Report the list contents. // StubManagerIterator it; while (it.Next()) { it.Current()->DoEnumMemoryRegions(flags); } } //----------------------------------------------------------- //----------------------------------------------------------- void StubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p StubManager base\n", dac_cast<TADDR>(this))); } #endif // #ifdef DACCESS_COMPILE //----------------------------------------------------------- // Initialize the global stub manager service. //----------------------------------------------------------- void StubManager::InitializeStubManagers() { #if !defined(DACCESS_COMPILE) #if defined(_DEBUG) s_DbgLogCrst.Init(CrstDebuggerHeapLock, (CrstFlags)(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD | CRST_TAKEN_DURING_SHUTDOWN)); #endif s_StubManagerListCrst.Init(CrstDebuggerHeapLock, (CrstFlags)(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD | CRST_TAKEN_DURING_SHUTDOWN)); #endif // !DACCESS_COMPILE } //----------------------------------------------------------- // Terminate the global stub manager service. //----------------------------------------------------------- void StubManager::TerminateStubManagers() { #if !defined(DACCESS_COMPILE) #if defined(_DEBUG) DbgFinishLog(); s_DbgLogCrst.Destroy(); #endif s_StubManagerListCrst.Destroy(); #endif // !DACCESS_COMPILE } #ifdef _DEBUG //----------------------------------------------------------- // Should stub-manager logging be enabled? //----------------------------------------------------------- bool StubManager::IsStubLoggingEnabled() { // Our current logging impl uses SString, which uses new(), which can't be called // on the helper thread. (B/c it may deadlock. See SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE) // We avoid this by just not logging when native-debugging. if (IsDebuggerPresent()) { return false; } return true; } //----------------------------------------------------------- // Call to reset the log. This is used at the start of a new step-operation. // pThread is the managed thread doing the stepping. // It should either be the current thread or the helper thread. //----------------------------------------------------------- void StubManager::DbgBeginLog(TADDR addrCallInstruction, TADDR addrCallTarget) { #ifndef DACCESS_COMPILE CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; // We can't call new() if another thread holds the heap lock and is then suspended by // an interop-debugging. Since this is debug-only logging code, we'll just skip // it under those cases. if (!IsStubLoggingEnabled()) { return; } // Now that we know we're not interop-debugging, we can safely call new. SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; FAULT_NOT_FATAL(); { CrstHolder ch(&s_DbgLogCrst); EX_TRY { if (s_pDbgStubManagerLog == NULL) { s_pDbgStubManagerLog = new SString(); } s_pDbgStubManagerLog->Clear(); } EX_CATCH { DbgFinishLog(); } EX_END_CATCH(SwallowAllExceptions); } DbgWriteLog("Beginning Step-in. IP after Call instruction is at 0x%p, call target is at 0x%p\n", addrCallInstruction, addrCallTarget); #endif } //----------------------------------------------------------- // Finish logging for this thread. // pThread is the managed thread doing the stepping. // It should either be the current thread or the helper thread. //----------------------------------------------------------- void StubManager::DbgFinishLog() { #ifndef DACCESS_COMPILE CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; CrstHolder ch(&s_DbgLogCrst); // Since this is just a tool for debugging, we don't care if we call new. SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; FAULT_NOT_FATAL(); delete s_pDbgStubManagerLog; s_pDbgStubManagerLog = NULL; #endif } //----------------------------------------------------------- // Write an arbitrary string to the log. //----------------------------------------------------------- void StubManager::DbgWriteLog(const CHAR *format, ...) { #ifndef DACCESS_COMPILE CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; if (!IsStubLoggingEnabled()) { return; } // Since this is just a tool for debugging, we don't care if we call new. SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; FAULT_NOT_FATAL(); CrstHolder ch(&s_DbgLogCrst); if (s_pDbgStubManagerLog == NULL) { return; } // Suppress asserts about lossy encoding conversion in SString::Printf CHECK chk; BOOL fEntered = chk.EnterAssert(); EX_TRY { va_list args; va_start(args, format); s_pDbgStubManagerLog->AppendVPrintf(format, args); va_end(args); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions); if (fEntered) chk.LeaveAssert(); #endif } //----------------------------------------------------------- // Get the log as a string. //----------------------------------------------------------- void StubManager::DbgGetLog(SString * pStringOut) { #ifndef DACCESS_COMPILE CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(pStringOut)); } CONTRACTL_END; if (!IsStubLoggingEnabled()) { return; } // Since this is just a tool for debugging, we don't care if we call new. SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; FAULT_NOT_FATAL(); CrstHolder ch(&s_DbgLogCrst); if (s_pDbgStubManagerLog == NULL) { return; } EX_TRY { pStringOut->Set(*s_pDbgStubManagerLog); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions); #endif } #endif // _DEBUG extern "C" void STDCALL ThePreStubPatchLabel(void); //----------------------------------------------------------- //----------------------------------------------------------- BOOL ThePreStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(stubStartAddress != NULL); PRECONDITION(CheckPointer(trace)); } CONTRACTL_END; // // We cannot tell where the stub will end up // until after the prestub worker has been run. // trace->InitForFramePush(GetEEFuncEntryPoint(ThePreStubPatchLabel)); return TRUE; } //----------------------------------------------------------- BOOL ThePreStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { LIMITED_METHOD_DAC_CONTRACT; return stubStartAddress == GetPreStubEntryPoint(); } // ------------------------------------------------------- // Stub manager functions & globals // ------------------------------------------------------- SPTR_IMPL(PrecodeStubManager, PrecodeStubManager, g_pManager); #ifndef DACCESS_COMPILE /* static */ void PrecodeStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END g_pManager = new PrecodeStubManager(); StubManager::AddStubManager(g_pManager); } #endif // #ifndef DACCESS_COMPILE /* static */ BOOL PrecodeStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { CONTRACTL { THROWS; // address may be bad, so we may AV. GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; return GetStubPrecodeRangeList()->IsInRange(stubStartAddress) || GetFixupPrecodeRangeList()->IsInRange(stubStartAddress); } BOOL PrecodeStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { CONTRACTL { INSTANCE_CHECK; NOTHROW; GC_NOTRIGGER; MODE_ANY; FORBID_FAULT; } CONTRACTL_END LOG((LF_CORDB, LL_EVERYTHING, "PrecodeStubManager::DoTraceStub called\n")); MethodDesc* pMD = NULL; #ifdef HAS_COMPACT_ENTRYPOINTS if (MethodDescChunk::IsCompactEntryPointAtAddress(stubStartAddress)) { pMD = MethodDescChunk::GetMethodDescFromCompactEntryPoint(stubStartAddress); } else #endif // HAS_COMPACT_ENTRYPOINTS { // When the target slot points to the fixup part of the fixup precode, we need to compensate // for that to get the actual stub address Precode* pPrecode = Precode::GetPrecodeFromEntryPoint(stubStartAddress - FixupPrecode::FixupCodeOffset, TRUE /* speculative */); if ((pPrecode == NULL) || (pPrecode->GetType() != PRECODE_FIXUP)) { pPrecode = Precode::GetPrecodeFromEntryPoint(stubStartAddress); } PREFIX_ASSUME(pPrecode != NULL); switch (pPrecode->GetType()) { case PRECODE_STUB: break; #ifdef HAS_NDIRECT_IMPORT_PRECODE case PRECODE_NDIRECT_IMPORT: #ifndef DACCESS_COMPILE trace->InitForUnmanaged(GetEEFuncEntryPoint(NDirectImportThunk)); #else trace->InitForOther(NULL); #endif LOG_TRACE_DESTINATION(trace, stubStartAddress, "PrecodeStubManager::DoTraceStub - NDirect import"); return TRUE; #endif // HAS_NDIRECT_IMPORT_PRECODE #ifdef HAS_FIXUP_PRECODE case PRECODE_FIXUP: break; #endif // HAS_FIXUP_PRECODE #ifdef HAS_THISPTR_RETBUF_PRECODE case PRECODE_THISPTR_RETBUF: break; #endif // HAS_THISPTR_RETBUF_PRECODE default: _ASSERTE_IMPL(!"DoTraceStub: Unexpected precode type"); break; } PCODE target = pPrecode->GetTarget(); // check if the method has been jitted if (!pPrecode->IsPointingToPrestub(target)) { trace->InitForStub(target); LOG_TRACE_DESTINATION(trace, stubStartAddress, "PrecodeStubManager::DoTraceStub - code"); return TRUE; } pMD = pPrecode->GetMethodDesc(); } PREFIX_ASSUME(pMD != NULL); // If the method is not IL, then we patch the prestub because no one will ever change the call here at the // MethodDesc. If, however, this is an IL method, then we are at risk to have another thread backpatch the call // here, so we'd miss if we patched the prestub. Therefore, we go right to the IL method and patch IL offset 0 // by using TRACE_UNJITTED_METHOD. if (!pMD->IsIL() && !pMD->IsILStub()) { trace->InitForStub(GetPreStubEntryPoint()); } else { trace->InitForUnjittedMethod(pMD); } LOG_TRACE_DESTINATION(trace, stubStartAddress, "PrecodeStubManager::DoTraceStub - prestub"); return TRUE; } #ifndef DACCESS_COMPILE BOOL PrecodeStubManager::TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(thread, NULL_OK)); PRECONDITION(CheckPointer(trace)); PRECONDITION(CheckPointer(pContext)); PRECONDITION(CheckPointer(pRetAddr)); } CONTRACTL_END; _ASSERTE(!"Unexpected call to PrecodeStubManager::TraceManager"); return FALSE; } #endif // ------------------------------------------------------- // StubLinkStubManager // ------------------------------------------------------- SPTR_IMPL(StubLinkStubManager, StubLinkStubManager, g_pManager); #ifndef DACCESS_COMPILE /* static */ void StubLinkStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END g_pManager = new StubLinkStubManager(); StubManager::AddStubManager(g_pManager); } #endif // #ifndef DACCESS_COMPILE BOOL StubLinkStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetRangeList()->IsInRange(stubStartAddress); } BOOL StubLinkStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { CONTRACTL { INSTANCE_CHECK; NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END LOG((LF_CORDB, LL_INFO10000, "StubLinkStubManager::DoTraceStub: stubStartAddress=0x%p\n", stubStartAddress)); Stub *stub = Stub::RecoverStub(stubStartAddress); LOG((LF_CORDB, LL_INFO10000, "StubLinkStubManager::DoTraceStub: stub=0x%p\n", stub)); // // If this is an intercept stub, we may be able to step // into the intercepted stub. // // <TODO>!!! Note that this case should not be necessary, it's just // here until I get all of the patch offsets & frame patch // methods in place.</TODO> // TADDR pRealAddr = 0; if (stub->IsMulticastDelegate()) { // If it's a MC delegate, then we want to set a BP & do a context-ful // manager push, so that we can figure out if this call will be to a // single multicast delegate or a multi multicast delegate trace->InitForManagerPush(stubStartAddress, this); LOG_TRACE_DESTINATION(trace, stubStartAddress, "StubLinkStubManager(MCDel)::DoTraceStub"); return TRUE; } else if (stub->IsInstantiatingStub()) { trace->InitForManagerPush(stubStartAddress, this); LOG_TRACE_DESTINATION(trace, stubStartAddress, "StubLinkStubManager(InstantiatingMethod)::DoTraceStub"); return TRUE; } else if (stub->GetPatchOffset() != 0) { // The patch offset is currently only non-zero in x86 non-IL delegate scenarios. trace->InitForFramePush((PCODE)stub->GetPatchAddress()); LOG_TRACE_DESTINATION(trace, stubStartAddress, "StubLinkStubManager::DoTraceStub"); return TRUE; } LOG((LF_CORDB, LL_INFO10000, "StubLinkStubManager::DoTraceStub: patch offset is 0!\n")); return FALSE; } #ifndef DACCESS_COMPILE static PCODE GetStubTarget(PTR_MethodDesc pTargetMD) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(pTargetMD != NULL); } CONTRACTL_END; NativeCodeVersion targetCode; #ifdef FEATURE_CODE_VERSIONING CodeVersionManager::LockHolder codeVersioningLockHolder; ILCodeVersion ilcode = pTargetMD->GetCodeVersionManager()->GetActiveILCodeVersion(pTargetMD); targetCode = ilcode.GetActiveNativeCodeVersion(pTargetMD); #else targetCode = NativeCodeVersion(pTargetMD); #endif if (targetCode.IsNull() || targetCode.GetNativeCode() == NULL) return NULL; return targetCode.GetNativeCode(); } BOOL StubLinkStubManager::TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr) { CONTRACTL { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(return FALSE;); } CONTRACTL_END LPVOID pc = (LPVOID)GetIP(pContext); *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext); LOG((LF_CORDB,LL_INFO10000, "SLSM:TM 0x%p, retAddr is 0x%p\n", pc, (*pRetAddr))); Stub *stub = Stub::RecoverStub((PCODE)pc); if (stub->IsInstantiatingStub()) { LOG((LF_CORDB,LL_INFO10000, "SLSM:TM Instantiating method stub\n")); PTR_MethodDesc pMD = stub->GetInstantiatedMethodDesc(); _ASSERTE(pMD != NULL); PCODE target = GetStubTarget(pMD); if (target == NULL) { LOG((LF_CORDB,LL_INFO10000, "SLSM:TM Unable to determine stub target, fd 0x%p\n", pMD)); trace->InitForUnjittedMethod(pMD); return TRUE; } trace->InitForManaged(target); return TRUE; } else if (stub->IsMulticastDelegate()) { LOG((LF_CORDB,LL_INFO10000, "SLSM:TM MultiCastDelegate\n")); BYTE *pbDel = (BYTE *)StubManagerHelpers::GetThisPtr(pContext); return DelegateInvokeStubManager::TraceDelegateObject(pbDel, trace); } // Runtime bug if we get here. Did we make a change in StubLinkStubManager::DoTraceStub() that // dispatched new stubs to TraceManager without writing the code to handle them? _ASSERTE(!"SLSM:TM wasn't expected to handle any other stub types"); return FALSE; } #endif // #ifndef DACCESS_COMPILE // ------------------------------------------------------- // Stub manager for thunks. // // Note, the only reason we have this stub manager is so that we can recgonize UMEntryThunks for IsTransitionStub. If it // turns out that having a full-blown stub manager for these things causes problems else where, then we can just attach // a range list to the thunk heap and have IsTransitionStub check that after checking with the main stub manager. // ------------------------------------------------------- SPTR_IMPL(ThunkHeapStubManager, ThunkHeapStubManager, g_pManager); #ifndef DACCESS_COMPILE /* static */ void ThunkHeapStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(COMPlusThrowOM()); } CONTRACTL_END; g_pManager = new ThunkHeapStubManager(); StubManager::AddStubManager(g_pManager); } #endif // !DACCESS_COMPILE BOOL ThunkHeapStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; // Its a stub if its in our heaps range. return GetRangeList()->IsInRange(stubStartAddress); } BOOL ThunkHeapStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { LIMITED_METHOD_CONTRACT; // We never trace through these stubs when stepping through managed code. The only reason we have this stub manager // is so that IsTransitionStub can recgonize UMEntryThunks. return FALSE; } // ------------------------------------------------------- // JumpStub stubs // // Stub manager for jump stubs created by ExecutionManager::jumpStub() // These are currently used only on the 64-bit targets IA64 and AMD64 // // ------------------------------------------------------- SPTR_IMPL(JumpStubStubManager, JumpStubStubManager, g_pManager); #ifndef DACCESS_COMPILE /* static */ void JumpStubStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END g_pManager = new JumpStubStubManager(); StubManager::AddStubManager(g_pManager); } #endif // #ifndef DACCESS_COMPILE BOOL JumpStubStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; // Forwarded to from RangeSectionStubManager return FALSE; } BOOL JumpStubStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { LIMITED_METHOD_CONTRACT; PCODE jumpTarget = decodeBackToBackJump(stubStartAddress); trace->InitForStub(jumpTarget); LOG_TRACE_DESTINATION(trace, stubStartAddress, "JumpStubStubManager::DoTraceStub"); return TRUE; } // // Stub manager for code sections. It forwards the query to the more appropriate // stub manager, or handles the query itself. // SPTR_IMPL(RangeSectionStubManager, RangeSectionStubManager, g_pManager); #ifndef DACCESS_COMPILE /* static */ void RangeSectionStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END g_pManager = new RangeSectionStubManager(); StubManager::AddStubManager(g_pManager); } #endif // #ifndef DACCESS_COMPILE BOOL RangeSectionStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; switch (GetStubKind(stubStartAddress)) { case STUB_CODE_BLOCK_PRECODE: case STUB_CODE_BLOCK_JUMPSTUB: case STUB_CODE_BLOCK_STUBLINK: case STUB_CODE_BLOCK_VIRTUAL_METHOD_THUNK: case STUB_CODE_BLOCK_EXTERNAL_METHOD_THUNK: case STUB_CODE_BLOCK_METHOD_CALL_THUNK: return TRUE; default: break; } return FALSE; } BOOL RangeSectionStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { CONTRACTL { INSTANCE_CHECK; NOTHROW; GC_NOTRIGGER; MODE_ANY; FORBID_FAULT; } CONTRACTL_END switch (GetStubKind(stubStartAddress)) { case STUB_CODE_BLOCK_PRECODE: return PrecodeStubManager::g_pManager->DoTraceStub(stubStartAddress, trace); case STUB_CODE_BLOCK_JUMPSTUB: return JumpStubStubManager::g_pManager->DoTraceStub(stubStartAddress, trace); case STUB_CODE_BLOCK_STUBLINK: return StubLinkStubManager::g_pManager->DoTraceStub(stubStartAddress, trace); case STUB_CODE_BLOCK_METHOD_CALL_THUNK: #ifdef DACCESS_COMPILE DacNotImpl(); #else trace->InitForManagerPush(GetEEFuncEntryPoint(ExternalMethodFixupPatchLabel), this); #endif return TRUE; default: break; } return FALSE; } #ifndef DACCESS_COMPILE BOOL RangeSectionStubManager::TraceManager(Thread *thread, TraceDestination *trace, CONTEXT *pContext, BYTE **pRetAddr) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(GetIP(pContext) == GetEEFuncEntryPoint(ExternalMethodFixupPatchLabel)); *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext); PCODE target = StubManagerHelpers::GetTailCallTarget(pContext); trace->InitForStub(target); return TRUE; } #endif #ifdef DACCESS_COMPILE LPCWSTR RangeSectionStubManager::GetStubManagerName(PCODE addr) { WRAPPER_NO_CONTRACT; switch (GetStubKind(addr)) { case STUB_CODE_BLOCK_PRECODE: return W("MethodDescPrestub"); case STUB_CODE_BLOCK_JUMPSTUB: return W("JumpStub"); case STUB_CODE_BLOCK_STUBLINK: return W("StubLinkStub"); case STUB_CODE_BLOCK_VIRTUAL_METHOD_THUNK: return W("VirtualMethodThunk"); case STUB_CODE_BLOCK_EXTERNAL_METHOD_THUNK: return W("ExternalMethodThunk"); case STUB_CODE_BLOCK_METHOD_CALL_THUNK: return W("MethodCallThunk"); default: break; } return W("UnknownRangeSectionStub"); } #endif // DACCESS_COMPILE StubCodeBlockKind RangeSectionStubManager::GetStubKind(PCODE stubStartAddress) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; RangeSection * pRS = ExecutionManager::FindCodeRange(stubStartAddress, ExecutionManager::ScanReaderLock); if (pRS == NULL) return STUB_CODE_BLOCK_UNKNOWN; return pRS->pjit->GetStubCodeBlockKind(pRS, stubStartAddress); } // // This is the stub manager for IL stubs. // #ifndef DACCESS_COMPILE /* static */ void ILStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END StubManager::AddStubManager(new ILStubManager()); } #endif // #ifndef DACCESS_COMPILE BOOL ILStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; MethodDesc *pMD = ExecutionManager::GetCodeMethodDesc(stubStartAddress); return (pMD != NULL) && pMD->IsILStub(); } BOOL ILStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { LIMITED_METHOD_CONTRACT; LOG((LF_CORDB, LL_EVERYTHING, "ILStubManager::DoTraceStub called\n")); #ifndef DACCESS_COMPILE PCODE traceDestination = NULL; #ifdef FEATURE_MULTICASTSTUB_AS_IL MethodDesc* pStubMD = ExecutionManager::GetCodeMethodDesc(stubStartAddress); if (pStubMD != NULL && pStubMD->AsDynamicMethodDesc()->IsMulticastStub()) { traceDestination = GetEEFuncEntryPoint(StubHelpers::MulticastDebuggerTraceHelper); } else #endif // FEATURE_MULTICASTSTUB_AS_IL { // This call is going out to unmanaged code, either through pinvoke or COM interop. traceDestination = stubStartAddress; } trace->InitForManagerPush(traceDestination, this); LOG_TRACE_DESTINATION(trace, traceDestination, "ILStubManager::DoTraceStub"); return TRUE; #else // !DACCESS_COMPILE trace->InitForOther(NULL); return FALSE; #endif // !DACCESS_COMPILE } #ifndef DACCESS_COMPILE #ifdef FEATURE_COMINTEROP static PCODE GetCOMTarget(Object *pThis, ComPlusCallInfo *pComPlusCallInfo) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; // calculate the target interface pointer SafeComHolder<IUnknown> pUnk; OBJECTREF oref = ObjectToOBJECTREF(pThis); GCPROTECT_BEGIN(oref); pUnk = ComObject::GetComIPFromRCWThrowing(&oref, pComPlusCallInfo->m_pInterfaceMT); GCPROTECT_END(); LPVOID *lpVtbl = *(LPVOID **)(IUnknown *)pUnk; PCODE target = (PCODE)lpVtbl[pComPlusCallInfo->m_cachedComSlot]; return target; } #endif // FEATURE_COMINTEROP BOOL ILStubManager::TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr) { // See code:ILStubCache.CreateNewMethodDesc for the code that sets flags on stub MDs PCODE stubIP = GetIP(pContext); *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext); #ifdef FEATURE_MULTICASTSTUB_AS_IL if (stubIP == GetEEFuncEntryPoint(StubHelpers::MulticastDebuggerTraceHelper)) { stubIP = (PCODE)*pRetAddr; *pRetAddr = (BYTE*)StubManagerHelpers::GetRetAddrFromMulticastILStubFrame(pContext); } #endif DynamicMethodDesc *pStubMD = Entry2MethodDesc(stubIP, NULL)->AsDynamicMethodDesc(); TADDR arg = StubManagerHelpers::GetHiddenArg(pContext); Object * pThis = StubManagerHelpers::GetThisPtr(pContext); LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Enter: StubMD 0x%p, HiddenArg 0x%p, ThisPtr 0x%p\n", pStubMD, arg, pThis)); // See code:ILStubCache.CreateNewMethodDesc for the code that sets flags on stub MDs PCODE target = NULL; #ifdef FEATURE_MULTICASTSTUB_AS_IL if (pStubMD->IsMulticastStub()) { _ASSERTE(GetIP(pContext) == GetEEFuncEntryPoint(StubHelpers::MulticastDebuggerTraceHelper)); int delegateCount = (int)StubManagerHelpers::GetSecondArg(pContext); int totalDelegateCount = (int)*(size_t*)((BYTE*)pThis + DelegateObject::GetOffsetOfInvocationCount()); if (delegateCount == totalDelegateCount) { LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Fired all delegates\n")); return FALSE; } else { // We're going to execute stub delegateCount next, so go and grab it. BYTE *pbDelInvocationList = *(BYTE **)((BYTE*)pThis + DelegateObject::GetOffsetOfInvocationList()); BYTE* pbDel = *(BYTE**)( ((ArrayBase *)pbDelInvocationList)->GetDataPtr() + ((ArrayBase *)pbDelInvocationList)->GetComponentSize()*delegateCount); _ASSERTE(pbDel); return DelegateInvokeStubManager::TraceDelegateObject(pbDel, trace); } } else #endif // FEATURE_MULTICASTSTUB_AS_IL if (pStubMD->IsReverseStub()) { if (pStubMD->IsStatic()) { // This is reverse P/Invoke stub, the argument is UMEntryThunk UMEntryThunk *pEntryThunk = (UMEntryThunk *)arg; target = pEntryThunk->GetManagedTarget(); LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Reverse P/Invoke case 0x%p\n", target)); } else { // This is COM-to-CLR stub, the argument is the target target = (PCODE)arg; LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: COM-to-CLR case 0x%p\n", target)); } trace->InitForManaged(target); } else if (pStubMD->HasFlags(DynamicMethodDesc::FlagIsDelegate)) { // This is forward delegate P/Invoke stub, the argument is undefined DelegateObject *pDel = (DelegateObject *)pThis; target = pDel->GetMethodPtrAux(); LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Forward delegate P/Invoke case 0x%p\n", target)); trace->InitForUnmanaged(target); } else if (pStubMD->HasFlags(DynamicMethodDesc::FlagIsCALLI)) { // This is unmanaged CALLI stub, the argument is the target target = (PCODE)arg; // The value is mangled on 64-bit #ifdef TARGET_AMD64 target = target >> 1; // call target is encoded as (addr << 1) | 1 #endif // TARGET_AMD64 LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Unmanaged CALLI case 0x%p\n", target)); trace->InitForUnmanaged(target); } else if (pStubMD->IsStepThroughStub()) { MethodDesc* pTargetMD = pStubMD->GetILStubResolver()->GetStubTargetMethodDesc(); if (pTargetMD == NULL) { LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Stub has no target to step through to\n")); return FALSE; } LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Step through to target - 0x%p\n", pTargetMD)); target = GetStubTarget(pTargetMD); if (target == NULL) return FALSE; trace->InitForManaged(target); } else if (pStubMD->HasMDContextArg()) { LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Hidden argument is MethodDesc\n")); // This is either direct forward P/Invoke or a CLR-to-COM call, the argument is MD MethodDesc *pMD = (MethodDesc *)arg; if (pMD->IsNDirect()) { target = (PCODE)((NDirectMethodDesc *)pMD)->GetNativeNDirectTarget(); LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Forward P/Invoke case 0x%p\n", target)); trace->InitForUnmanaged(target); } #ifdef FEATURE_COMINTEROP else { LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Stub is CLR-to-COM\n")); _ASSERTE(pMD->IsComPlusCall()); ComPlusCallMethodDesc *pCMD = (ComPlusCallMethodDesc *)pMD; _ASSERTE(!pCMD->IsStatic() && !pCMD->IsCtor() && "Static methods and constructors are not supported for built-in classic COM"); if (pThis != NULL) { target = GetCOMTarget(pThis, pCMD->m_pComPlusCallInfo); LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: CLR-to-COM case 0x%p\n", target)); trace->InitForUnmanaged(target); } } #endif // FEATURE_COMINTEROP } else { LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: No known target, IL Stub is a leaf\n")); // There's no "target" so we have nowhere to tell the debugger to move the breakpoint. return FALSE; } return TRUE; } #endif //!DACCESS_COMPILE // This is used to recognize GenericComPlusCallStub, VarargPInvokeStub, and GenericPInvokeCalliHelper. #ifndef DACCESS_COMPILE /* static */ void InteropDispatchStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END StubManager::AddStubManager(new InteropDispatchStubManager()); } #endif // #ifndef DACCESS_COMPILE PCODE TheGenericComplusCallStub(); // clrtocom.cpp #ifndef DACCESS_COMPILE static BOOL IsVarargPInvokeStub(PCODE stubStartAddress) { LIMITED_METHOD_CONTRACT; if (stubStartAddress == GetEEFuncEntryPoint(VarargPInvokeStub)) return TRUE; #if !defined(TARGET_X86) && !defined(TARGET_ARM64) if (stubStartAddress == GetEEFuncEntryPoint(VarargPInvokeStub_RetBuffArg)) return TRUE; #endif return FALSE; } #endif // #ifndef DACCESS_COMPILE BOOL InteropDispatchStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; //@dbgtodo dharvey implement DAC suport #ifndef DACCESS_COMPILE #ifdef FEATURE_COMINTEROP if (stubStartAddress == GetEEFuncEntryPoint(GenericComPlusCallStub)) { return true; } #endif // FEATURE_COMINTEROP if (IsVarargPInvokeStub(stubStartAddress)) { return true; } if (stubStartAddress == GetEEFuncEntryPoint(GenericPInvokeCalliHelper)) { return true; } #endif // !DACCESS_COMPILE return false; } BOOL InteropDispatchStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { LIMITED_METHOD_CONTRACT; LOG((LF_CORDB, LL_EVERYTHING, "InteropDispatchStubManager::DoTraceStub called\n")); #ifndef DACCESS_COMPILE _ASSERTE(CheckIsStub_Internal(stubStartAddress)); trace->InitForManagerPush(stubStartAddress, this); LOG_TRACE_DESTINATION(trace, stubStartAddress, "InteropDispatchStubManager::DoTraceStub"); return TRUE; #else // !DACCESS_COMPILE trace->InitForOther(NULL); return FALSE; #endif // !DACCESS_COMPILE } #ifndef DACCESS_COMPILE BOOL InteropDispatchStubManager::TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext); TADDR arg = StubManagerHelpers::GetHiddenArg(pContext); // IL stub may not exist at this point so we init directly for the target (TODO?) if (IsVarargPInvokeStub(GetIP(pContext))) { NDirectMethodDesc *pNMD = (NDirectMethodDesc *)arg; _ASSERTE(pNMD->IsNDirect()); PCODE target = (PCODE)pNMD->GetNDirectTarget(); LOG((LF_CORDB, LL_INFO10000, "IDSM::TraceManager: Vararg P/Invoke case 0x%p\n", target)); trace->InitForUnmanaged(target); } else if (GetIP(pContext) == GetEEFuncEntryPoint(GenericPInvokeCalliHelper)) { PCODE target = (PCODE)arg; LOG((LF_CORDB, LL_INFO10000, "IDSM::TraceManager: Unmanaged CALLI case 0x%p\n", target)); trace->InitForUnmanaged(target); } #ifdef FEATURE_COMINTEROP else { ComPlusCallMethodDesc *pCMD = (ComPlusCallMethodDesc *)arg; _ASSERTE(pCMD->IsComPlusCall()); Object * pThis = StubManagerHelpers::GetThisPtr(pContext); { if (!pCMD->m_pComPlusCallInfo->m_pInterfaceMT->IsComEventItfType() && (pCMD->m_pComPlusCallInfo->m_pILStub != NULL)) { // Early-bound CLR->COM call - continue in the IL stub trace->InitForStub(pCMD->m_pComPlusCallInfo->m_pILStub); } else { // Late-bound CLR->COM call - continue in target's IDispatch::Invoke OBJECTREF oref = ObjectToOBJECTREF(pThis); GCPROTECT_BEGIN(oref); MethodTable *pItfMT = pCMD->m_pComPlusCallInfo->m_pInterfaceMT; _ASSERTE(pItfMT->GetComInterfaceType() == ifDispatch); SafeComHolder<IUnknown> pUnk = ComObject::GetComIPFromRCWThrowing(&oref, pItfMT); LPVOID *lpVtbl = *(LPVOID **)(IUnknown *)pUnk; PCODE target = (PCODE)lpVtbl[6]; // DISPATCH_INVOKE_SLOT; LOG((LF_CORDB, LL_INFO10000, "IDSM::TraceManager: CLR-to-COM late-bound case 0x%p\n", target)); trace->InitForUnmanaged(target); GCPROTECT_END(); } } } #endif // FEATURE_COMINTEROP return TRUE; } #endif //!DACCESS_COMPILE // // Since we don't generate delegate invoke stubs at runtime on IA64, we // can't use the StubLinkStubManager for these stubs. Instead, we create // an additional DelegateInvokeStubManager instead. // SPTR_IMPL(DelegateInvokeStubManager, DelegateInvokeStubManager, g_pManager); #ifndef DACCESS_COMPILE // static void DelegateInvokeStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END g_pManager = new DelegateInvokeStubManager(); StubManager::AddStubManager(g_pManager); } BOOL DelegateInvokeStubManager::AddStub(Stub* pStub) { WRAPPER_NO_CONTRACT; PCODE start = pStub->GetEntryPoint(); // We don't really care about the size here. We only stop in these stubs at the first instruction, // so we'll never be asked to claim an address in the middle of a stub. return GetRangeList()->AddRange((BYTE *)start, (BYTE *)start + 1, (LPVOID)start); } void DelegateInvokeStubManager::RemoveStub(Stub* pStub) { WRAPPER_NO_CONTRACT; PCODE start = pStub->GetEntryPoint(); // We don't really care about the size here. We only stop in these stubs at the first instruction, // so we'll never be asked to claim an address in the middle of a stub. GetRangeList()->RemoveRanges((LPVOID)start); } #endif BOOL DelegateInvokeStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { LIMITED_METHOD_DAC_CONTRACT; bool fIsStub = false; #ifndef DACCESS_COMPILE #ifndef TARGET_X86 fIsStub = fIsStub || (stubStartAddress == GetEEFuncEntryPoint(SinglecastDelegateInvokeStub)); #endif #endif // !DACCESS_COMPILE fIsStub = fIsStub || GetRangeList()->IsInRange(stubStartAddress); return fIsStub; } BOOL DelegateInvokeStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { LIMITED_METHOD_CONTRACT; LOG((LF_CORDB, LL_EVERYTHING, "DelegateInvokeStubManager::DoTraceStub called\n")); _ASSERTE(CheckIsStub_Internal(stubStartAddress)); // If it's a MC delegate, then we want to set a BP & do a context-ful // manager push, so that we can figure out if this call will be to a // single multicast delegate or a multi multicast delegate trace->InitForManagerPush(stubStartAddress, this); LOG_TRACE_DESTINATION(trace, stubStartAddress, "DelegateInvokeStubManager::DoTraceStub"); return TRUE; } #if !defined(DACCESS_COMPILE) BOOL DelegateInvokeStubManager::TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr) { CONTRACTL { MODE_COOPERATIVE; } CONTRACTL_END; PCODE destAddr; PCODE pc; pc = ::GetIP(pContext); BYTE* pThis; pThis = NULL; // Retrieve the this pointer from the context. #if defined(TARGET_X86) (*pRetAddr) = *(BYTE **)(size_t)(pContext->Esp); pThis = (BYTE*)(size_t)(pContext->Ecx); destAddr = *(PCODE*)(pThis + DelegateObject::GetOffsetOfMethodPtrAux()); #elif defined(TARGET_AMD64) // <TODO> // We need to check whether the following is the correct return address. // </TODO> (*pRetAddr) = *(BYTE **)(size_t)(pContext->Rsp); LOG((LF_CORDB, LL_INFO10000, "DISM:TM at 0x%p, retAddr is 0x%p\n", pc, (*pRetAddr))); DELEGATEREF orDelegate; if (GetEEFuncEntryPoint(SinglecastDelegateInvokeStub) == pc) { LOG((LF_CORDB, LL_INFO10000, "DISM::TraceManager: isSingle\n")); orDelegate = (DELEGATEREF)ObjectToOBJECTREF(StubManagerHelpers::GetThisPtr(pContext)); // _methodPtr is where we are going to next. However, in ngen cases, we may have a shuffle thunk // burned into the ngen image, in which case the shuffle thunk is not added to the range list of // the DelegateInvokeStubManager. So we use _methodPtrAux as a fallback. destAddr = orDelegate->GetMethodPtr(); if (StubManager::TraceStub(destAddr, trace)) { LOG((LF_CORDB,LL_INFO10000, "DISM::TM: ppbDest: 0x%p\n", destAddr)); LOG((LF_CORDB,LL_INFO10000, "DISM::TM: res: 1, result type: %d\n", trace->GetTraceType())); return TRUE; } } else { // We get here if we are stopped at the beginning of a shuffle thunk. // The next address we are going to is _methodPtrAux. Stub* pStub = Stub::RecoverStub(pc); // We use the patch offset field to indicate whether the stub has a hidden return buffer argument. // This field is set in SetupShuffleThunk(). if (pStub->GetPatchOffset() != 0) { // This stub has a hidden return buffer argument. orDelegate = (DELEGATEREF)ObjectToOBJECTREF(StubManagerHelpers::GetSecondArg(pContext)); } else { orDelegate = (DELEGATEREF)ObjectToOBJECTREF(StubManagerHelpers::GetThisPtr(pContext)); } } destAddr = orDelegate->GetMethodPtrAux(); #elif defined(TARGET_ARM) (*pRetAddr) = (BYTE *)(size_t)(pContext->Lr); pThis = (BYTE*)(size_t)(pContext->R0); // Could be in the singlecast invoke stub (in which case the next destination is in _methodPtr) or a // shuffle thunk (destination in _methodPtrAux). int offsetOfNextDest; if (pc == GetEEFuncEntryPoint(SinglecastDelegateInvokeStub)) offsetOfNextDest = DelegateObject::GetOffsetOfMethodPtr(); else offsetOfNextDest = DelegateObject::GetOffsetOfMethodPtrAux(); destAddr = *(PCODE*)(pThis + offsetOfNextDest); #elif defined(TARGET_ARM64) (*pRetAddr) = (BYTE *)(size_t)(pContext->Lr); pThis = (BYTE*)(size_t)(pContext->X0); // Could be in the singlecast invoke stub (in which case the next destination is in _methodPtr) or a // shuffle thunk (destination in _methodPtrAux). int offsetOfNextDest; if (pc == GetEEFuncEntryPoint(SinglecastDelegateInvokeStub)) offsetOfNextDest = DelegateObject::GetOffsetOfMethodPtr(); else offsetOfNextDest = DelegateObject::GetOffsetOfMethodPtrAux(); destAddr = *(PCODE*)(pThis + offsetOfNextDest); #else PORTABILITY_ASSERT("DelegateInvokeStubManager::TraceManager"); destAddr = NULL; #endif LOG((LF_CORDB,LL_INFO10000, "DISM::TM: ppbDest: 0x%p\n", destAddr)); BOOL res = StubManager::TraceStub(destAddr, trace); LOG((LF_CORDB,LL_INFO10000, "DISM::TM: res: %d, result type: %d\n", res, trace->GetTraceType())); return res; } // static BOOL DelegateInvokeStubManager::TraceDelegateObject(BYTE* pbDel, TraceDestination *trace) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; BYTE **ppbDest = NULL; // If we got here, then we're here b/c we're at the start of a delegate stub // need to figure out the kind of delegates we are dealing with BYTE *pbDelInvocationList = *(BYTE **)(pbDel + DelegateObject::GetOffsetOfInvocationList()); LOG((LF_CORDB,LL_INFO10000, "DISM::TMI: invocationList: 0x%p\n", pbDelInvocationList)); if (pbDelInvocationList == NULL) { // null invocationList can be one of the following: // Instance closed, Instance open non-virt, Instance open virtual, Static closed, Static opened, Unmanaged FtnPtr // Instance open virtual is complex and we need to figure out what to do (TODO). // For the others the logic is the following: // if _methodPtrAux is 0 the target is in _methodPtr, otherwise the taret is _methodPtrAux ppbDest = (BYTE **)(pbDel + DelegateObject::GetOffsetOfMethodPtrAux()); if (*ppbDest == NULL) { ppbDest = (BYTE **)(pbDel + DelegateObject::GetOffsetOfMethodPtr()); if (*ppbDest == NULL) { // it's not looking good, bail out LOG((LF_CORDB,LL_INFO10000, "DISM(DelegateStub)::TM: can't trace into it\n")); return FALSE; } } LOG((LF_CORDB,LL_INFO10000, "DISM(DelegateStub)::TM: ppbDest: 0x%p *ppbDest:0x%p\n", ppbDest, *ppbDest)); BOOL res = StubManager::TraceStub((PCODE) (*ppbDest), trace); LOG((LF_CORDB,LL_INFO10000, "DISM(MCDel)::TM: res: %d, result type: %d\n", res, trace->GetTraceType())); return res; } // invocationList is not null, so it can be one of the following: // Multicast, Static closed (special sig), Secure // rule out the static with special sig BYTE *pbCount = *(BYTE **)(pbDel + DelegateObject::GetOffsetOfInvocationCount()); if (!pbCount) { // it's a static closed, the target lives in _methodAuxPtr ppbDest = (BYTE **)(pbDel + DelegateObject::GetOffsetOfMethodPtrAux()); if (*ppbDest == NULL) { // it's not looking good, bail out LOG((LF_CORDB,LL_INFO10000, "DISM(DelegateStub)::TM: can't trace into it\n")); return FALSE; } LOG((LF_CORDB,LL_INFO10000, "DISM(DelegateStub)::TM: ppbDest: 0x%p *ppbDest:0x%p\n", ppbDest, *ppbDest)); BOOL res = StubManager::TraceStub((PCODE) (*ppbDest), trace); LOG((LF_CORDB,LL_INFO10000, "DISM(MCDel)::TM: res: %d, result type: %d\n", res, trace->GetTraceType())); return res; } MethodTable *pType = *(MethodTable**)pbDelInvocationList; if (pType->IsDelegate()) { // this is a secure deelgate. The target is hidden inside this field, so recurse in and pray... return TraceDelegateObject(pbDelInvocationList, trace); } // Otherwise, we're going for the first invoke of the multi case. // In order to go to the correct spot, we have just have to fish out // slot 0 of the invocation list, and figure out where that's going to, // then put a breakpoint there... pbDel = *(BYTE**)(((ArrayBase *)pbDelInvocationList)->GetDataPtr()); return TraceDelegateObject(pbDel, trace); } #endif // DACCESS_COMPILE #if defined(TARGET_X86) && !defined(UNIX_X86_ABI) #if !defined(DACCESS_COMPILE) // static void TailCallStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END StubManager::AddStubManager(new TailCallStubManager()); } bool TailCallStubManager::IsTailCallJitHelper(PCODE code) { LIMITED_METHOD_CONTRACT; return code == GetEEFuncEntryPoint(JIT_TailCall); } #endif // !DACCESS_COMPILED BOOL TailCallStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { LIMITED_METHOD_DAC_CONTRACT; bool fIsStub = false; #if !defined(DACCESS_COMPILE) fIsStub = IsTailCallJitHelper(stubStartAddress); #endif // !DACCESS_COMPILE return fIsStub; } #if !defined(DACCESS_COMPILE) EXTERN_C void STDCALL JIT_TailCallLeave(); EXTERN_C void STDCALL JIT_TailCallVSDLeave(); BOOL TailCallStubManager::TraceManager(Thread * pThread, TraceDestination * pTrace, T_CONTEXT * pContext, BYTE ** ppRetAddr) { WRAPPER_NO_CONTRACT; TADDR esp = GetSP(pContext); TADDR ebp = GetFP(pContext); // Check if we are stopped at the beginning of JIT_TailCall(). if (GetIP(pContext) == GetEEFuncEntryPoint(JIT_TailCall)) { // There are two cases in JIT_TailCall(). The first one is a normal tail call. // The second one is a tail call to a virtual method. *ppRetAddr = *(reinterpret_cast<BYTE **>(ebp + sizeof(SIZE_T))); // Check whether this is a VSD tail call. SIZE_T flags = *(reinterpret_cast<SIZE_T *>(esp + JIT_TailCall_StackOffsetToFlags)); if (flags & 0x2) { // This is a VSD tail call. pTrace->InitForManagerPush(GetEEFuncEntryPoint(JIT_TailCallVSDLeave), this); return TRUE; } else { // This is not a VSD tail call. pTrace->InitForManagerPush(GetEEFuncEntryPoint(JIT_TailCallLeave), this); return TRUE; } } else { if (GetIP(pContext) == GetEEFuncEntryPoint(JIT_TailCallLeave)) { // This is the simple case. The tail call goes directly to the target. There won't be an // explicit frame on the stack. We should be right at the return instruction which branches to // the call target. The return address is stored in the second leafmost stack slot. *ppRetAddr = *(reinterpret_cast<BYTE **>(esp + sizeof(SIZE_T))); } else { _ASSERTE(GetIP(pContext) == GetEEFuncEntryPoint(JIT_TailCallVSDLeave)); // This is the VSD case. The tail call goes through a assembly helper function which sets up // and tears down an explicit frame. In this case, the return address is at the same place // as on entry to JIT_TailCall(). *ppRetAddr = *(reinterpret_cast<BYTE **>(ebp + sizeof(SIZE_T))); } // In both cases, the target address is stored in the leafmost stack slot. pTrace->InitForStub((PCODE)*reinterpret_cast<SIZE_T *>(esp)); return TRUE; } } #endif // !DACCESS_COMPILE BOOL TailCallStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { WRAPPER_NO_CONTRACT; LOG((LF_CORDB, LL_EVERYTHING, "TailCallStubManager::DoTraceStub called\n")); BOOL fResult = FALSE; // Make sure we are stopped at the beginning of JIT_TailCall(). _ASSERTE(CheckIsStub_Internal(stubStartAddress)); trace->InitForManagerPush(stubStartAddress, this); fResult = TRUE; LOG_TRACE_DESTINATION(trace, stubStartAddress, "TailCallStubManager::DoTraceStub"); return fResult; } #endif // TARGET_X86 && !UNIX_X86_ABI #ifdef DACCESS_COMPILE void PrecodeStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p PrecodeStubManager\n", dac_cast<TADDR>(this))); GetStubPrecodeRangeList()->EnumMemoryRegions(flags); GetFixupPrecodeRangeList()->EnumMemoryRegions(flags); } void StubLinkStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p StubLinkStubManager\n", dac_cast<TADDR>(this))); GetRangeList()->EnumMemoryRegions(flags); } void ThunkHeapStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p ThunkHeapStubManager\n", dac_cast<TADDR>(this))); GetRangeList()->EnumMemoryRegions(flags); } void JumpStubStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p JumpStubStubManager\n", dac_cast<TADDR>(this))); } void RangeSectionStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p RangeSectionStubManager\n", dac_cast<TADDR>(this))); } void ILStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p ILStubManager\n", dac_cast<TADDR>(this))); } void InteropDispatchStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p InteropDispatchStubManager\n", dac_cast<TADDR>(this))); } void DelegateInvokeStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p DelegateInvokeStubManager\n", dac_cast<TADDR>(this))); GetRangeList()->EnumMemoryRegions(flags); } void VirtualCallStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p VirtualCallStubManager\n", dac_cast<TADDR>(this))); GetLookupRangeList()->EnumMemoryRegions(flags); GetResolveRangeList()->EnumMemoryRegions(flags); GetDispatchRangeList()->EnumMemoryRegions(flags); GetCacheEntryRangeList()->EnumMemoryRegions(flags); } #if defined(TARGET_X86) && !defined(UNIX_X86_ABI) void TailCallStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p TailCallStubManager\n", dac_cast<TADDR>(this))); } #endif #endif // #ifdef DACCESS_COMPILE
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "common.h" #include "stubmgr.h" #include "virtualcallstub.h" #include "dllimportcallback.h" #include "stubhelpers.h" #include "asmconstants.h" #ifdef FEATURE_COMINTEROP #include "olecontexthelpers.h" #endif #ifdef LOGGING const char *GetTType( TraceType tt) { LIMITED_METHOD_CONTRACT; switch( tt ) { case TRACE_ENTRY_STUB: return "TRACE_ENTRY_STUB"; case TRACE_STUB: return "TRACE_STUB"; case TRACE_UNMANAGED: return "TRACE_UNMANAGED"; case TRACE_MANAGED: return "TRACE_MANAGED"; case TRACE_FRAME_PUSH: return "TRACE_FRAME_PUSH"; case TRACE_MGR_PUSH: return "TRACE_MGR_PUSH"; case TRACE_OTHER: return "TRACE_OTHER"; case TRACE_UNJITTED_METHOD: return "TRACE_UNJITTED_METHOD"; } return "TRACE_REALLY_WACKED"; } void LogTraceDestination(const char * szHint, PCODE stubAddr, TraceDestination * pTrace) { LIMITED_METHOD_CONTRACT; if (pTrace->GetTraceType() == TRACE_UNJITTED_METHOD) { MethodDesc * md = pTrace->GetMethodDesc(); LOG((LF_CORDB, LL_INFO10000, "'%s' yields '%s' to method 0x%p for input 0x%p.\n", szHint, GetTType(pTrace->GetTraceType()), md, stubAddr)); } else { LOG((LF_CORDB, LL_INFO10000, "'%s' yields '%s' to address 0x%p for input 0x%p.\n", szHint, GetTType(pTrace->GetTraceType()), pTrace->GetAddress(), stubAddr)); } } #endif #ifdef _DEBUG // Get a string representation of this TraceDestination // Uses the supplied buffer to store the memory (or may return a string literal). const WCHAR * TraceDestination::DbgToString(SString & buffer) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; const WCHAR * pValue = W("unknown"); #ifndef DACCESS_COMPILE if (!StubManager::IsStubLoggingEnabled()) { return W("<unavailable while native-debugging>"); } // Now that we know we're not interop-debugging, we can safely call new. SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; FAULT_NOT_FATAL(); EX_TRY { switch(this->type) { case TRACE_ENTRY_STUB: buffer.Printf("TRACE_ENTRY_STUB(addr=0x%p)", GetAddress()); pValue = buffer.GetUnicode(); break; case TRACE_STUB: buffer.Printf("TRACE_STUB(addr=0x%p)", GetAddress()); pValue = buffer.GetUnicode(); break; case TRACE_UNMANAGED: buffer.Printf("TRACE_UNMANAGED(addr=0x%p)", GetAddress()); pValue = buffer.GetUnicode(); break; case TRACE_MANAGED: buffer.Printf("TRACE_MANAGED(addr=0x%p)", GetAddress()); pValue = buffer.GetUnicode(); break; case TRACE_UNJITTED_METHOD: { MethodDesc * md = this->GetMethodDesc(); buffer.Printf("TRACE_UNJITTED_METHOD(md=0x%p, %s::%s)", md, md->m_pszDebugClassName, md->m_pszDebugMethodName); pValue = buffer.GetUnicode(); } break; case TRACE_FRAME_PUSH: buffer.Printf("TRACE_FRAME_PUSH(addr=0x%p)", GetAddress()); pValue = buffer.GetUnicode(); break; case TRACE_MGR_PUSH: buffer.Printf("TRACE_MGR_PUSH(addr=0x%p, sm=%s)", GetAddress(), this->GetStubManager()->DbgGetName()); pValue = buffer.GetUnicode(); break; case TRACE_OTHER: pValue = W("TRACE_OTHER"); break; } } EX_CATCH { pValue = W("(OOM while printing TD)"); } EX_END_CATCH(SwallowAllExceptions); #endif return pValue; } #endif void TraceDestination::InitForUnjittedMethod(MethodDesc * pDesc) { CONTRACTL { GC_NOTRIGGER; NOTHROW; MODE_ANY; PRECONDITION(CheckPointer(pDesc)); } CONTRACTL_END; _ASSERTE(pDesc->SanityCheck()); { // If this is a wrapper stub, then find the real method that it will go to and patch that. // This is more than just a convenience - converted wrapper MD to real MD is required for correct behavior. // Wrapper MDs look like unjitted MethodDescs. So when the debugger patches one, // it won't actually bind + apply the patch (it'll wait for the jit-complete instead). // But if the wrapper MD is for prejitted code, then we'll never get the Jit-complete. // Thus it'll miss the patch completely. if (pDesc->IsWrapperStub()) { MethodDesc * pNewDesc = NULL; FAULT_NOT_FATAL(); #ifndef DACCESS_COMPILE EX_TRY { pNewDesc = pDesc->GetExistingWrappedMethodDesc(); } EX_CATCH { // In case of an error, we'll just stick w/ the original method desc. } EX_END_CATCH(SwallowAllExceptions) #else // @todo - DAC needs this too, but the method is currently not DACized. // However, we don't throw here b/c the error may not be fatal. // DacNotImpl(); #endif if (pNewDesc != NULL) { pDesc = pNewDesc; LOG((LF_CORDB, LL_INFO10000, "TD::UnjittedMethod: wrapper md: %p --> %p\n", pDesc, pNewDesc)); } } } this->type = TRACE_UNJITTED_METHOD; this->pDesc = pDesc; this->stubManager = NULL; } // Initialize statics. #ifdef _DEBUG SString * StubManager::s_pDbgStubManagerLog = NULL; CrstStatic StubManager::s_DbgLogCrst; #endif SPTR_IMPL(StubManager, StubManager, g_pFirstManager); CrstStatic StubManager::s_StubManagerListCrst; //----------------------------------------------------------- // For perf reasons, the stub managers are now kept in a two // tier system: all stub managers but the VirtualStubManagers // are in the first tier. A VirtualStubManagerManager takes // care of all VirtualStubManagers, and is iterated last of // all. It does a smarter job of looking up the owning // manager for virtual stubs, checking the current and shared // appdomains before checking the remaining managers. // // Thus, this iterator will run the regular list until it // hits the end, then it will check the VSMM, then it will // end. //----------------------------------------------------------- class StubManagerIterator { public: StubManagerIterator(); ~StubManagerIterator(); void Reset(); BOOL Next(); PTR_StubManager Current(); protected: enum SMI_State { SMI_START, SMI_NORMAL, SMI_VIRTUALCALLSTUBMANAGER, SMI_END }; SMI_State m_state; PTR_StubManager m_pCurMgr; SimpleReadLockHolder m_lh; }; //----------------------------------------------------------- // Ctor //----------------------------------------------------------- StubManagerIterator::StubManagerIterator() { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; Reset(); } void StubManagerIterator::Reset() { LIMITED_METHOD_DAC_CONTRACT; m_pCurMgr = NULL; m_state = SMI_START; } //----------------------------------------------------------- // Ctor //----------------------------------------------------------- StubManagerIterator::~StubManagerIterator() { LIMITED_METHOD_DAC_CONTRACT; } //----------------------------------------------------------- // Move to the next element. Iterators are created at // start-1, so must call Next before using Current //----------------------------------------------------------- BOOL StubManagerIterator::Next() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; #ifndef DACCESS_COMPILE CAN_TAKE_LOCK; // because of m_lh.Assign() #else CANNOT_TAKE_LOCK; #endif } CONTRACTL_END; SUPPORTS_DAC; do { if (m_state == SMI_START) { m_state = SMI_NORMAL; m_pCurMgr = StubManager::g_pFirstManager; } else if (m_state == SMI_NORMAL) { if (m_pCurMgr != NULL) { m_pCurMgr = m_pCurMgr->m_pNextManager; } else { // If we've reached the end of the regular list of stub managers, then we // set the VirtualCallStubManagerManager is the current item (effectively // forcing it to always be the last manager checked). m_state = SMI_VIRTUALCALLSTUBMANAGER; VirtualCallStubManagerManager *pVCSMMgr = VirtualCallStubManagerManager::GlobalManager(); m_pCurMgr = PTR_StubManager(pVCSMMgr); #ifndef DACCESS_COMPILE m_lh.Assign(&pVCSMMgr->m_RWLock); #endif } } else if (m_state == SMI_VIRTUALCALLSTUBMANAGER) { m_state = SMI_END; m_pCurMgr = NULL; #ifndef DACCESS_COMPILE m_lh.Clear(); #endif } } while (m_state != SMI_END && m_pCurMgr == NULL); CONSISTENCY_CHECK(m_state == SMI_END || m_pCurMgr != NULL); return (m_state != SMI_END); } //----------------------------------------------------------- // Get the current contents of the iterator //----------------------------------------------------------- PTR_StubManager StubManagerIterator::Current() { LIMITED_METHOD_DAC_CONTRACT; CONSISTENCY_CHECK(m_state != SMI_START); CONSISTENCY_CHECK(m_state != SMI_END); CONSISTENCY_CHECK(CheckPointer(m_pCurMgr)); return m_pCurMgr; } #ifndef DACCESS_COMPILE //----------------------------------------------------------- //----------------------------------------------------------- StubManager::StubManager() : m_pNextManager(NULL) { LIMITED_METHOD_CONTRACT; } //----------------------------------------------------------- //----------------------------------------------------------- StubManager::~StubManager() { CONTRACTL { NOTHROW; GC_NOTRIGGER; CAN_TAKE_LOCK; // StubManager::UnlinkStubManager uses a crst PRECONDITION(CheckPointer(this)); } CONTRACTL_END; UnlinkStubManager(this); } #endif // #ifndef DACCESS_COMPILE #ifdef _DEBUG_IMPL //----------------------------------------------------------- // Verify that the stub is owned by the given stub manager // and no other stub manager. If a stub is claimed by multiple managers, // then the wrong manager may claim ownership and improperly trace the stub. //----------------------------------------------------------- BOOL StubManager::IsSingleOwner(PCODE stubAddress, StubManager * pOwner) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_CAN_TAKE_LOCK; // courtesy StubManagerIterator // ensure this stubmanager owns it. _ASSERTE(pOwner != NULL); // ensure nobody else does. bool ownerFound = false; int count = 0; StubManagerIterator it; while (it.Next()) { // Callers would have iterated till pOwner. if (!ownerFound && it.Current() != pOwner) continue; if (it.Current() == pOwner) ownerFound = true; if (it.Current()->CheckIsStub_Worker(stubAddress)) { // If you hit this assert, you can tell what 2 stub managers are conflicting by inspecting their vtable. CONSISTENCY_CHECK_MSGF((it.Current() == pOwner), ("Stub at 0x%p is owner by multiple managers (0x%p, 0x%p)", (void*) stubAddress, pOwner, it.Current())); count++; } else { _ASSERTE(it.Current() != pOwner); } } _ASSERTE(ownerFound); // We expect pOwner to be the only one to own this stub. return (count == 1); } #endif //----------------------------------------------------------- //----------------------------------------------------------- BOOL StubManager::CheckIsStub_Worker(PCODE stubStartAddress) { CONTRACTL { NOTHROW; CAN_TAKE_LOCK; // CheckIsStub_Internal can enter SimpleRWLock GC_NOTRIGGER; } CONTRACTL_END; SUPPORTS_DAC; // @todo - consider having a single check for null right up front. // Though this may cover bugs where stub-managers don't handle bad addresses. // And someone could just as easily pass (0x01) as NULL. if (stubStartAddress == NULL) { return FALSE; } struct Param { BOOL fIsStub; StubManager *pThis; TADDR stubStartAddress; } param; param.fIsStub = FALSE; param.pThis = this; param.stubStartAddress = stubStartAddress; // This may be called from DAC, and DAC + non-DAC have very different // exception handling. #ifdef DACCESS_COMPILE PAL_TRY(Param *, pParam, &param) #else Param *pParam = &param; EX_TRY #endif { SUPPORTS_DAC; #ifndef DACCESS_COMPILE // Use CheckIsStub_Internal may AV. That's ok. AVInRuntimeImplOkayHolder AVOkay; #endif // Make a Polymorphic call to derived stub manager. // Try to see if this address is for a stub. If the address is // completely bogus, then this might fault, so we protect it // with SEH. pParam->fIsStub = pParam->pThis->CheckIsStub_Internal(pParam->stubStartAddress); } #ifdef DACCESS_COMPILE PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) #else EX_CATCH #endif { LOG((LF_CORDB, LL_INFO10000, "D::GASTSI: exception indicated addr is bad.\n")); param.fIsStub = FALSE; } #ifdef DACCESS_COMPILE PAL_ENDTRY #else EX_END_CATCH(SwallowAllExceptions); #endif return param.fIsStub; } //----------------------------------------------------------- // stubAddress may be an invalid address. //----------------------------------------------------------- PTR_StubManager StubManager::FindStubManager(PCODE stubAddress) { CONTRACTL { NOTHROW; GC_NOTRIGGER; CAN_TAKE_LOCK; // courtesy StubManagerIterator } CONTRACTL_END; SUPPORTS_DAC; StubManagerIterator it; while (it.Next()) { if (it.Current()->CheckIsStub_Worker(stubAddress)) { _ASSERTE_IMPL(IsSingleOwner(stubAddress, it.Current())); return it.Current(); } } return NULL; } //----------------------------------------------------------- // Given an address, figure out a TraceDestination describing where // the instructions at that address will eventually transfer execution to. //----------------------------------------------------------- BOOL StubManager::TraceStub(PCODE stubStartAddress, TraceDestination *trace) { WRAPPER_NO_CONTRACT; StubManagerIterator it; while (it.Next()) { StubManager * pCurrent = it.Current(); if (pCurrent->CheckIsStub_Worker(stubStartAddress)) { LOG((LF_CORDB, LL_INFO10000, "StubManager::TraceStub: addr 0x%p claimed by mgr " "0x%p.\n", stubStartAddress, pCurrent)); _ASSERTE_IMPL(IsSingleOwner(stubStartAddress, pCurrent)); BOOL fValid = pCurrent->DoTraceStub(stubStartAddress, trace); #ifdef _DEBUG if (IsStubLoggingEnabled()) { DbgWriteLog("Doing TraceStub for Address 0x%p, claimed by '%s' (0x%p)\n", stubStartAddress, pCurrent->DbgGetName(), pCurrent); if (fValid) { SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; FAULT_NOT_FATAL(); SString buffer; DbgWriteLog(" td=%S\n", trace->DbgToString(buffer)); } else { DbgWriteLog(" stubmanager returned false. Does not expect to call managed code\n"); } } // logging #endif return fValid; } } if (ExecutionManager::IsManagedCode(stubStartAddress)) { trace->InitForManaged(stubStartAddress); #ifdef _DEBUG DbgWriteLog("Doing TraceStub for Address 0x%p is jitted code claimed by codemanager\n", stubStartAddress); #endif LOG((LF_CORDB, LL_INFO10000, "StubManager::TraceStub: addr 0x%p is managed code\n", stubStartAddress)); return TRUE; } LOG((LF_CORDB, LL_INFO10000, "StubManager::TraceStub: addr 0x%p unknown. TRACE_OTHER...\n", stubStartAddress)); #ifdef _DEBUG DbgWriteLog("Doing TraceStub for Address 0x%p is unknown!!!\n", stubStartAddress); #endif trace->InitForOther(stubStartAddress); return FALSE; } //----------------------------------------------------------- //----------------------------------------------------------- BOOL StubManager::FollowTrace(TraceDestination *trace) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; while (trace->GetTraceType() == TRACE_STUB) { LOG((LF_CORDB, LL_INFO10000, "StubManager::FollowTrace: TRACE_STUB for 0x%p\n", trace->GetAddress())); if (!TraceStub(trace->GetAddress(), trace)) { // // No stub manager claimed it - it must be an EE helper or something. // trace->InitForOther(trace->GetAddress()); } } LOG_TRACE_DESTINATION(trace, NULL, "StubManager::FollowTrace"); return trace->GetTraceType() != TRACE_OTHER; } #ifndef DACCESS_COMPILE //----------------------------------------------------------- //----------------------------------------------------------- void StubManager::AddStubManager(StubManager *mgr) { WRAPPER_NO_CONTRACT; CONSISTENCY_CHECK(CheckPointer(g_pFirstManager, NULL_OK)); CONSISTENCY_CHECK(CheckPointer(mgr)); GCX_COOP_NO_THREAD_BROKEN(); CrstHolder ch(&s_StubManagerListCrst); if (g_pFirstManager == NULL) { g_pFirstManager = mgr; } else { mgr->m_pNextManager = g_pFirstManager; g_pFirstManager = mgr; } LOG((LF_CORDB, LL_EVERYTHING, "StubManager::AddStubManager - 0x%p (vptr %p)\n", mgr, (*(PVOID*)mgr))); } //----------------------------------------------------------- // NOTE: The runtime MUST be suspended to use this in a // truly safe manner. //----------------------------------------------------------- void StubManager::UnlinkStubManager(StubManager *mgr) { STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_CAN_TAKE_LOCK; CONSISTENCY_CHECK(CheckPointer(g_pFirstManager, NULL_OK)); CONSISTENCY_CHECK(CheckPointer(mgr)); CrstHolder ch(&s_StubManagerListCrst); StubManager **m = &g_pFirstManager; while (*m != NULL) { if (*m == mgr) { *m = (*m)->m_pNextManager; return; } m = &(*m)->m_pNextManager; } } #endif // #ifndef DACCESS_COMPILE #ifdef DACCESS_COMPILE //----------------------------------------------------------- //----------------------------------------------------------- void StubManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; // Report the global list head. DacEnumMemoryRegion(DacGlobalBase() + g_dacGlobals.StubManager__g_pFirstManager, sizeof(TADDR)); // // Report the list contents. // StubManagerIterator it; while (it.Next()) { it.Current()->DoEnumMemoryRegions(flags); } } //----------------------------------------------------------- //----------------------------------------------------------- void StubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p StubManager base\n", dac_cast<TADDR>(this))); } #endif // #ifdef DACCESS_COMPILE //----------------------------------------------------------- // Initialize the global stub manager service. //----------------------------------------------------------- void StubManager::InitializeStubManagers() { #if !defined(DACCESS_COMPILE) #if defined(_DEBUG) s_DbgLogCrst.Init(CrstDebuggerHeapLock, (CrstFlags)(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD | CRST_TAKEN_DURING_SHUTDOWN)); #endif s_StubManagerListCrst.Init(CrstDebuggerHeapLock, (CrstFlags)(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD | CRST_TAKEN_DURING_SHUTDOWN)); #endif // !DACCESS_COMPILE } //----------------------------------------------------------- // Terminate the global stub manager service. //----------------------------------------------------------- void StubManager::TerminateStubManagers() { #if !defined(DACCESS_COMPILE) #if defined(_DEBUG) DbgFinishLog(); s_DbgLogCrst.Destroy(); #endif s_StubManagerListCrst.Destroy(); #endif // !DACCESS_COMPILE } #ifdef _DEBUG //----------------------------------------------------------- // Should stub-manager logging be enabled? //----------------------------------------------------------- bool StubManager::IsStubLoggingEnabled() { // Our current logging impl uses SString, which uses new(), which can't be called // on the helper thread. (B/c it may deadlock. See SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE) // We avoid this by just not logging when native-debugging. if (IsDebuggerPresent()) { return false; } return true; } //----------------------------------------------------------- // Call to reset the log. This is used at the start of a new step-operation. // pThread is the managed thread doing the stepping. // It should either be the current thread or the helper thread. //----------------------------------------------------------- void StubManager::DbgBeginLog(TADDR addrCallInstruction, TADDR addrCallTarget) { #ifndef DACCESS_COMPILE CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; // We can't call new() if another thread holds the heap lock and is then suspended by // an interop-debugging. Since this is debug-only logging code, we'll just skip // it under those cases. if (!IsStubLoggingEnabled()) { return; } // Now that we know we're not interop-debugging, we can safely call new. SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; FAULT_NOT_FATAL(); { CrstHolder ch(&s_DbgLogCrst); EX_TRY { if (s_pDbgStubManagerLog == NULL) { s_pDbgStubManagerLog = new SString(); } s_pDbgStubManagerLog->Clear(); } EX_CATCH { DbgFinishLog(); } EX_END_CATCH(SwallowAllExceptions); } DbgWriteLog("Beginning Step-in. IP after Call instruction is at 0x%p, call target is at 0x%p\n", addrCallInstruction, addrCallTarget); #endif } //----------------------------------------------------------- // Finish logging for this thread. // pThread is the managed thread doing the stepping. // It should either be the current thread or the helper thread. //----------------------------------------------------------- void StubManager::DbgFinishLog() { #ifndef DACCESS_COMPILE CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; CrstHolder ch(&s_DbgLogCrst); // Since this is just a tool for debugging, we don't care if we call new. SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; FAULT_NOT_FATAL(); delete s_pDbgStubManagerLog; s_pDbgStubManagerLog = NULL; #endif } //----------------------------------------------------------- // Write an arbitrary string to the log. //----------------------------------------------------------- void StubManager::DbgWriteLog(const CHAR *format, ...) { #ifndef DACCESS_COMPILE CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; if (!IsStubLoggingEnabled()) { return; } // Since this is just a tool for debugging, we don't care if we call new. SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; FAULT_NOT_FATAL(); CrstHolder ch(&s_DbgLogCrst); if (s_pDbgStubManagerLog == NULL) { return; } // Suppress asserts about lossy encoding conversion in SString::Printf CHECK chk; BOOL fEntered = chk.EnterAssert(); EX_TRY { va_list args; va_start(args, format); s_pDbgStubManagerLog->AppendVPrintf(format, args); va_end(args); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions); if (fEntered) chk.LeaveAssert(); #endif } //----------------------------------------------------------- // Get the log as a string. //----------------------------------------------------------- void StubManager::DbgGetLog(SString * pStringOut) { #ifndef DACCESS_COMPILE CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(pStringOut)); } CONTRACTL_END; if (!IsStubLoggingEnabled()) { return; } // Since this is just a tool for debugging, we don't care if we call new. SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; FAULT_NOT_FATAL(); CrstHolder ch(&s_DbgLogCrst); if (s_pDbgStubManagerLog == NULL) { return; } EX_TRY { pStringOut->Set(*s_pDbgStubManagerLog); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions); #endif } #endif // _DEBUG extern "C" void STDCALL ThePreStubPatchLabel(void); //----------------------------------------------------------- //----------------------------------------------------------- BOOL ThePreStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(stubStartAddress != NULL); PRECONDITION(CheckPointer(trace)); } CONTRACTL_END; // // We cannot tell where the stub will end up // until after the prestub worker has been run. // trace->InitForFramePush(GetEEFuncEntryPoint(ThePreStubPatchLabel)); return TRUE; } //----------------------------------------------------------- BOOL ThePreStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { LIMITED_METHOD_DAC_CONTRACT; return stubStartAddress == GetPreStubEntryPoint(); } // ------------------------------------------------------- // Stub manager functions & globals // ------------------------------------------------------- SPTR_IMPL(PrecodeStubManager, PrecodeStubManager, g_pManager); #ifndef DACCESS_COMPILE /* static */ void PrecodeStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END g_pManager = new PrecodeStubManager(); StubManager::AddStubManager(g_pManager); } #endif // #ifndef DACCESS_COMPILE /* static */ BOOL PrecodeStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { CONTRACTL { THROWS; // address may be bad, so we may AV. GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; return GetStubPrecodeRangeList()->IsInRange(stubStartAddress) || GetFixupPrecodeRangeList()->IsInRange(stubStartAddress); } BOOL PrecodeStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { CONTRACTL { INSTANCE_CHECK; NOTHROW; GC_NOTRIGGER; MODE_ANY; FORBID_FAULT; } CONTRACTL_END LOG((LF_CORDB, LL_EVERYTHING, "PrecodeStubManager::DoTraceStub called\n")); MethodDesc* pMD = NULL; #ifdef HAS_COMPACT_ENTRYPOINTS if (MethodDescChunk::IsCompactEntryPointAtAddress(stubStartAddress)) { pMD = MethodDescChunk::GetMethodDescFromCompactEntryPoint(stubStartAddress); } else #endif // HAS_COMPACT_ENTRYPOINTS { // When the target slot points to the fixup part of the fixup precode, we need to compensate // for that to get the actual stub address Precode* pPrecode = Precode::GetPrecodeFromEntryPoint(stubStartAddress - FixupPrecode::FixupCodeOffset, TRUE /* speculative */); if ((pPrecode == NULL) || (pPrecode->GetType() != PRECODE_FIXUP)) { pPrecode = Precode::GetPrecodeFromEntryPoint(stubStartAddress); } PREFIX_ASSUME(pPrecode != NULL); switch (pPrecode->GetType()) { case PRECODE_STUB: break; #ifdef HAS_NDIRECT_IMPORT_PRECODE case PRECODE_NDIRECT_IMPORT: #ifndef DACCESS_COMPILE trace->InitForUnmanaged(GetEEFuncEntryPoint(NDirectImportThunk)); #else trace->InitForOther(NULL); #endif LOG_TRACE_DESTINATION(trace, stubStartAddress, "PrecodeStubManager::DoTraceStub - NDirect import"); return TRUE; #endif // HAS_NDIRECT_IMPORT_PRECODE #ifdef HAS_FIXUP_PRECODE case PRECODE_FIXUP: break; #endif // HAS_FIXUP_PRECODE #ifdef HAS_THISPTR_RETBUF_PRECODE case PRECODE_THISPTR_RETBUF: break; #endif // HAS_THISPTR_RETBUF_PRECODE default: _ASSERTE_IMPL(!"DoTraceStub: Unexpected precode type"); break; } PCODE target = pPrecode->GetTarget(); // check if the method has been jitted if (!pPrecode->IsPointingToPrestub(target)) { trace->InitForStub(target); LOG_TRACE_DESTINATION(trace, stubStartAddress, "PrecodeStubManager::DoTraceStub - code"); return TRUE; } pMD = pPrecode->GetMethodDesc(); } PREFIX_ASSUME(pMD != NULL); // If the method is not IL, then we patch the prestub because no one will ever change the call here at the // MethodDesc. If, however, this is an IL method, then we are at risk to have another thread backpatch the call // here, so we'd miss if we patched the prestub. Therefore, we go right to the IL method and patch IL offset 0 // by using TRACE_UNJITTED_METHOD. if (!pMD->IsIL() && !pMD->IsILStub()) { trace->InitForStub(GetPreStubEntryPoint()); } else { trace->InitForUnjittedMethod(pMD); } LOG_TRACE_DESTINATION(trace, stubStartAddress, "PrecodeStubManager::DoTraceStub - prestub"); return TRUE; } #ifndef DACCESS_COMPILE BOOL PrecodeStubManager::TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(thread, NULL_OK)); PRECONDITION(CheckPointer(trace)); PRECONDITION(CheckPointer(pContext)); PRECONDITION(CheckPointer(pRetAddr)); } CONTRACTL_END; _ASSERTE(!"Unexpected call to PrecodeStubManager::TraceManager"); return FALSE; } #endif // ------------------------------------------------------- // StubLinkStubManager // ------------------------------------------------------- SPTR_IMPL(StubLinkStubManager, StubLinkStubManager, g_pManager); #ifndef DACCESS_COMPILE /* static */ void StubLinkStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END g_pManager = new StubLinkStubManager(); StubManager::AddStubManager(g_pManager); } #endif // #ifndef DACCESS_COMPILE BOOL StubLinkStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetRangeList()->IsInRange(stubStartAddress); } BOOL StubLinkStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { CONTRACTL { INSTANCE_CHECK; NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END LOG((LF_CORDB, LL_INFO10000, "StubLinkStubManager::DoTraceStub: stubStartAddress=0x%p\n", stubStartAddress)); Stub *stub = Stub::RecoverStub(stubStartAddress); LOG((LF_CORDB, LL_INFO10000, "StubLinkStubManager::DoTraceStub: stub=0x%p\n", stub)); // // If this is an intercept stub, we may be able to step // into the intercepted stub. // // <TODO>!!! Note that this case should not be necessary, it's just // here until I get all of the patch offsets & frame patch // methods in place.</TODO> // TADDR pRealAddr = 0; if (stub->IsMulticastDelegate()) { // If it's a MC delegate, then we want to set a BP & do a context-ful // manager push, so that we can figure out if this call will be to a // single multicast delegate or a multi multicast delegate trace->InitForManagerPush(stubStartAddress, this); LOG_TRACE_DESTINATION(trace, stubStartAddress, "StubLinkStubManager(MCDel)::DoTraceStub"); return TRUE; } else if (stub->IsInstantiatingStub()) { trace->InitForManagerPush(stubStartAddress, this); LOG_TRACE_DESTINATION(trace, stubStartAddress, "StubLinkStubManager(InstantiatingMethod)::DoTraceStub"); return TRUE; } else if (stub->GetPatchOffset() != 0) { // The patch offset is currently only non-zero in x86 non-IL delegate scenarios. trace->InitForFramePush((PCODE)stub->GetPatchAddress()); LOG_TRACE_DESTINATION(trace, stubStartAddress, "StubLinkStubManager::DoTraceStub"); return TRUE; } LOG((LF_CORDB, LL_INFO10000, "StubLinkStubManager::DoTraceStub: patch offset is 0!\n")); return FALSE; } #ifndef DACCESS_COMPILE static PCODE GetStubTarget(PTR_MethodDesc pTargetMD) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(pTargetMD != NULL); } CONTRACTL_END; NativeCodeVersion targetCode; #ifdef FEATURE_CODE_VERSIONING CodeVersionManager::LockHolder codeVersioningLockHolder; ILCodeVersion ilcode = pTargetMD->GetCodeVersionManager()->GetActiveILCodeVersion(pTargetMD); targetCode = ilcode.GetActiveNativeCodeVersion(pTargetMD); #else targetCode = NativeCodeVersion(pTargetMD); #endif if (targetCode.IsNull() || targetCode.GetNativeCode() == NULL) return NULL; return targetCode.GetNativeCode(); } BOOL StubLinkStubManager::TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr) { CONTRACTL { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(return FALSE;); } CONTRACTL_END LPVOID pc = (LPVOID)GetIP(pContext); *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext); LOG((LF_CORDB,LL_INFO10000, "SLSM:TM 0x%p, retAddr is 0x%p\n", pc, (*pRetAddr))); Stub *stub = Stub::RecoverStub((PCODE)pc); if (stub->IsInstantiatingStub()) { LOG((LF_CORDB,LL_INFO10000, "SLSM:TM Instantiating method stub\n")); PTR_MethodDesc pMD = stub->GetInstantiatedMethodDesc(); _ASSERTE(pMD != NULL); PCODE target = GetStubTarget(pMD); if (target == NULL) { LOG((LF_CORDB,LL_INFO10000, "SLSM:TM Unable to determine stub target, fd 0x%p\n", pMD)); trace->InitForUnjittedMethod(pMD); return TRUE; } trace->InitForManaged(target); return TRUE; } else if (stub->IsMulticastDelegate()) { LOG((LF_CORDB,LL_INFO10000, "SLSM:TM MultiCastDelegate\n")); BYTE *pbDel = (BYTE *)StubManagerHelpers::GetThisPtr(pContext); return DelegateInvokeStubManager::TraceDelegateObject(pbDel, trace); } // Runtime bug if we get here. Did we make a change in StubLinkStubManager::DoTraceStub() that // dispatched new stubs to TraceManager without writing the code to handle them? _ASSERTE(!"SLSM:TM wasn't expected to handle any other stub types"); return FALSE; } #endif // #ifndef DACCESS_COMPILE // ------------------------------------------------------- // Stub manager for thunks. // // Note, the only reason we have this stub manager is so that we can recgonize UMEntryThunks for IsTransitionStub. If it // turns out that having a full-blown stub manager for these things causes problems else where, then we can just attach // a range list to the thunk heap and have IsTransitionStub check that after checking with the main stub manager. // ------------------------------------------------------- SPTR_IMPL(ThunkHeapStubManager, ThunkHeapStubManager, g_pManager); #ifndef DACCESS_COMPILE /* static */ void ThunkHeapStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(COMPlusThrowOM()); } CONTRACTL_END; g_pManager = new ThunkHeapStubManager(); StubManager::AddStubManager(g_pManager); } #endif // !DACCESS_COMPILE BOOL ThunkHeapStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; // Its a stub if its in our heaps range. return GetRangeList()->IsInRange(stubStartAddress); } BOOL ThunkHeapStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { LIMITED_METHOD_CONTRACT; // We never trace through these stubs when stepping through managed code. The only reason we have this stub manager // is so that IsTransitionStub can recgonize UMEntryThunks. return FALSE; } // ------------------------------------------------------- // JumpStub stubs // // Stub manager for jump stubs created by ExecutionManager::jumpStub() // These are currently used only on the 64-bit targets IA64 and AMD64 // // ------------------------------------------------------- SPTR_IMPL(JumpStubStubManager, JumpStubStubManager, g_pManager); #ifndef DACCESS_COMPILE /* static */ void JumpStubStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END g_pManager = new JumpStubStubManager(); StubManager::AddStubManager(g_pManager); } #endif // #ifndef DACCESS_COMPILE BOOL JumpStubStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; // Forwarded to from RangeSectionStubManager return FALSE; } BOOL JumpStubStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { LIMITED_METHOD_CONTRACT; PCODE jumpTarget = decodeBackToBackJump(stubStartAddress); trace->InitForStub(jumpTarget); LOG_TRACE_DESTINATION(trace, stubStartAddress, "JumpStubStubManager::DoTraceStub"); return TRUE; } // // Stub manager for code sections. It forwards the query to the more appropriate // stub manager, or handles the query itself. // SPTR_IMPL(RangeSectionStubManager, RangeSectionStubManager, g_pManager); #ifndef DACCESS_COMPILE /* static */ void RangeSectionStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END g_pManager = new RangeSectionStubManager(); StubManager::AddStubManager(g_pManager); } #endif // #ifndef DACCESS_COMPILE BOOL RangeSectionStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; switch (GetStubKind(stubStartAddress)) { case STUB_CODE_BLOCK_PRECODE: case STUB_CODE_BLOCK_JUMPSTUB: case STUB_CODE_BLOCK_STUBLINK: case STUB_CODE_BLOCK_VIRTUAL_METHOD_THUNK: case STUB_CODE_BLOCK_EXTERNAL_METHOD_THUNK: case STUB_CODE_BLOCK_METHOD_CALL_THUNK: return TRUE; default: break; } return FALSE; } BOOL RangeSectionStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { CONTRACTL { INSTANCE_CHECK; NOTHROW; GC_NOTRIGGER; MODE_ANY; FORBID_FAULT; } CONTRACTL_END switch (GetStubKind(stubStartAddress)) { case STUB_CODE_BLOCK_PRECODE: return PrecodeStubManager::g_pManager->DoTraceStub(stubStartAddress, trace); case STUB_CODE_BLOCK_JUMPSTUB: return JumpStubStubManager::g_pManager->DoTraceStub(stubStartAddress, trace); case STUB_CODE_BLOCK_STUBLINK: return StubLinkStubManager::g_pManager->DoTraceStub(stubStartAddress, trace); case STUB_CODE_BLOCK_METHOD_CALL_THUNK: #ifdef DACCESS_COMPILE DacNotImpl(); #else trace->InitForManagerPush(GetEEFuncEntryPoint(ExternalMethodFixupPatchLabel), this); #endif return TRUE; default: break; } return FALSE; } #ifndef DACCESS_COMPILE BOOL RangeSectionStubManager::TraceManager(Thread *thread, TraceDestination *trace, CONTEXT *pContext, BYTE **pRetAddr) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(GetIP(pContext) == GetEEFuncEntryPoint(ExternalMethodFixupPatchLabel)); *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext); PCODE target = StubManagerHelpers::GetTailCallTarget(pContext); trace->InitForStub(target); return TRUE; } #endif #ifdef DACCESS_COMPILE LPCWSTR RangeSectionStubManager::GetStubManagerName(PCODE addr) { WRAPPER_NO_CONTRACT; switch (GetStubKind(addr)) { case STUB_CODE_BLOCK_PRECODE: return W("MethodDescPrestub"); case STUB_CODE_BLOCK_JUMPSTUB: return W("JumpStub"); case STUB_CODE_BLOCK_STUBLINK: return W("StubLinkStub"); case STUB_CODE_BLOCK_VIRTUAL_METHOD_THUNK: return W("VirtualMethodThunk"); case STUB_CODE_BLOCK_EXTERNAL_METHOD_THUNK: return W("ExternalMethodThunk"); case STUB_CODE_BLOCK_METHOD_CALL_THUNK: return W("MethodCallThunk"); default: break; } return W("UnknownRangeSectionStub"); } #endif // DACCESS_COMPILE StubCodeBlockKind RangeSectionStubManager::GetStubKind(PCODE stubStartAddress) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; RangeSection * pRS = ExecutionManager::FindCodeRange(stubStartAddress, ExecutionManager::ScanReaderLock); if (pRS == NULL) return STUB_CODE_BLOCK_UNKNOWN; return pRS->pjit->GetStubCodeBlockKind(pRS, stubStartAddress); } // // This is the stub manager for IL stubs. // #ifndef DACCESS_COMPILE /* static */ void ILStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END StubManager::AddStubManager(new ILStubManager()); } #endif // #ifndef DACCESS_COMPILE BOOL ILStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; MethodDesc *pMD = ExecutionManager::GetCodeMethodDesc(stubStartAddress); return (pMD != NULL) && pMD->IsILStub(); } BOOL ILStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { LIMITED_METHOD_CONTRACT; LOG((LF_CORDB, LL_EVERYTHING, "ILStubManager::DoTraceStub called\n")); #ifndef DACCESS_COMPILE PCODE traceDestination = NULL; #ifdef FEATURE_MULTICASTSTUB_AS_IL MethodDesc* pStubMD = ExecutionManager::GetCodeMethodDesc(stubStartAddress); if (pStubMD != NULL && pStubMD->AsDynamicMethodDesc()->IsMulticastStub()) { traceDestination = GetEEFuncEntryPoint(StubHelpers::MulticastDebuggerTraceHelper); } else #endif // FEATURE_MULTICASTSTUB_AS_IL { // This call is going out to unmanaged code, either through pinvoke or COM interop. traceDestination = stubStartAddress; } trace->InitForManagerPush(traceDestination, this); LOG_TRACE_DESTINATION(trace, traceDestination, "ILStubManager::DoTraceStub"); return TRUE; #else // !DACCESS_COMPILE trace->InitForOther(NULL); return FALSE; #endif // !DACCESS_COMPILE } #ifndef DACCESS_COMPILE #ifdef FEATURE_COMINTEROP static PCODE GetCOMTarget(Object *pThis, ComPlusCallInfo *pComPlusCallInfo) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; // calculate the target interface pointer SafeComHolder<IUnknown> pUnk; OBJECTREF oref = ObjectToOBJECTREF(pThis); GCPROTECT_BEGIN(oref); pUnk = ComObject::GetComIPFromRCWThrowing(&oref, pComPlusCallInfo->m_pInterfaceMT); GCPROTECT_END(); LPVOID *lpVtbl = *(LPVOID **)(IUnknown *)pUnk; PCODE target = (PCODE)lpVtbl[pComPlusCallInfo->m_cachedComSlot]; return target; } #endif // FEATURE_COMINTEROP BOOL ILStubManager::TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr) { // See code:ILStubCache.CreateNewMethodDesc for the code that sets flags on stub MDs PCODE stubIP = GetIP(pContext); *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext); #ifdef FEATURE_MULTICASTSTUB_AS_IL if (stubIP == GetEEFuncEntryPoint(StubHelpers::MulticastDebuggerTraceHelper)) { stubIP = (PCODE)*pRetAddr; *pRetAddr = (BYTE*)StubManagerHelpers::GetRetAddrFromMulticastILStubFrame(pContext); } #endif DynamicMethodDesc *pStubMD = Entry2MethodDesc(stubIP, NULL)->AsDynamicMethodDesc(); TADDR arg = StubManagerHelpers::GetHiddenArg(pContext); Object * pThis = StubManagerHelpers::GetThisPtr(pContext); LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Enter: StubMD 0x%p, HiddenArg 0x%p, ThisPtr 0x%p\n", pStubMD, arg, pThis)); // See code:ILStubCache.CreateNewMethodDesc for the code that sets flags on stub MDs PCODE target = NULL; #ifdef FEATURE_MULTICASTSTUB_AS_IL if (pStubMD->IsMulticastStub()) { _ASSERTE(GetIP(pContext) == GetEEFuncEntryPoint(StubHelpers::MulticastDebuggerTraceHelper)); int delegateCount = (int)StubManagerHelpers::GetSecondArg(pContext); int totalDelegateCount = (int)*(size_t*)((BYTE*)pThis + DelegateObject::GetOffsetOfInvocationCount()); if (delegateCount == totalDelegateCount) { LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Fired all delegates\n")); return FALSE; } else { // We're going to execute stub delegateCount next, so go and grab it. BYTE *pbDelInvocationList = *(BYTE **)((BYTE*)pThis + DelegateObject::GetOffsetOfInvocationList()); BYTE* pbDel = *(BYTE**)( ((ArrayBase *)pbDelInvocationList)->GetDataPtr() + ((ArrayBase *)pbDelInvocationList)->GetComponentSize()*delegateCount); _ASSERTE(pbDel); return DelegateInvokeStubManager::TraceDelegateObject(pbDel, trace); } } else #endif // FEATURE_MULTICASTSTUB_AS_IL if (pStubMD->IsReverseStub()) { if (pStubMD->IsStatic()) { // This is reverse P/Invoke stub, the argument is UMEntryThunk UMEntryThunk *pEntryThunk = (UMEntryThunk *)arg; target = pEntryThunk->GetManagedTarget(); LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Reverse P/Invoke case 0x%p\n", target)); } else { // This is COM-to-CLR stub, the argument is the target target = (PCODE)arg; LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: COM-to-CLR case 0x%p\n", target)); } trace->InitForManaged(target); } else if (pStubMD->HasFlags(DynamicMethodDesc::FlagIsDelegate)) { // This is forward delegate P/Invoke stub, the argument is undefined DelegateObject *pDel = (DelegateObject *)pThis; target = pDel->GetMethodPtrAux(); LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Forward delegate P/Invoke case 0x%p\n", target)); trace->InitForUnmanaged(target); } else if (pStubMD->HasFlags(DynamicMethodDesc::FlagIsCALLI)) { // This is unmanaged CALLI stub, the argument is the target target = (PCODE)arg; // The value is mangled on 64-bit #ifdef TARGET_AMD64 target = target >> 1; // call target is encoded as (addr << 1) | 1 #endif // TARGET_AMD64 LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Unmanaged CALLI case 0x%p\n", target)); trace->InitForUnmanaged(target); } else if (pStubMD->IsStepThroughStub()) { MethodDesc* pTargetMD = pStubMD->GetILStubResolver()->GetStubTargetMethodDesc(); if (pTargetMD == NULL) { LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Stub has no target to step through to\n")); return FALSE; } LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Step through to target - 0x%p\n", pTargetMD)); target = GetStubTarget(pTargetMD); if (target == NULL) return FALSE; trace->InitForManaged(target); } else if (pStubMD->HasMDContextArg()) { LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Hidden argument is MethodDesc\n")); // This is either direct forward P/Invoke or a CLR-to-COM call, the argument is MD MethodDesc *pMD = (MethodDesc *)arg; if (pMD->IsNDirect()) { target = (PCODE)((NDirectMethodDesc *)pMD)->GetNativeNDirectTarget(); LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Forward P/Invoke case 0x%p\n", target)); trace->InitForUnmanaged(target); } #ifdef FEATURE_COMINTEROP else { LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Stub is CLR-to-COM\n")); _ASSERTE(pMD->IsComPlusCall()); ComPlusCallMethodDesc *pCMD = (ComPlusCallMethodDesc *)pMD; _ASSERTE(!pCMD->IsStatic() && !pCMD->IsCtor() && "Static methods and constructors are not supported for built-in classic COM"); if (pThis != NULL) { target = GetCOMTarget(pThis, pCMD->m_pComPlusCallInfo); LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: CLR-to-COM case 0x%p\n", target)); trace->InitForUnmanaged(target); } } #endif // FEATURE_COMINTEROP } else { LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: No known target, IL Stub is a leaf\n")); // There's no "target" so we have nowhere to tell the debugger to move the breakpoint. return FALSE; } return TRUE; } #endif //!DACCESS_COMPILE // This is used to recognize GenericComPlusCallStub, VarargPInvokeStub, and GenericPInvokeCalliHelper. #ifndef DACCESS_COMPILE /* static */ void InteropDispatchStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END StubManager::AddStubManager(new InteropDispatchStubManager()); } #endif // #ifndef DACCESS_COMPILE PCODE TheGenericComplusCallStub(); // clrtocom.cpp #ifndef DACCESS_COMPILE static BOOL IsVarargPInvokeStub(PCODE stubStartAddress) { LIMITED_METHOD_CONTRACT; if (stubStartAddress == GetEEFuncEntryPoint(VarargPInvokeStub)) return TRUE; #if !defined(TARGET_X86) && !defined(TARGET_ARM64) if (stubStartAddress == GetEEFuncEntryPoint(VarargPInvokeStub_RetBuffArg)) return TRUE; #endif return FALSE; } #endif // #ifndef DACCESS_COMPILE BOOL InteropDispatchStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; //@dbgtodo dharvey implement DAC suport #ifndef DACCESS_COMPILE #ifdef FEATURE_COMINTEROP if (stubStartAddress == GetEEFuncEntryPoint(GenericComPlusCallStub)) { return true; } #endif // FEATURE_COMINTEROP if (IsVarargPInvokeStub(stubStartAddress)) { return true; } if (stubStartAddress == GetEEFuncEntryPoint(GenericPInvokeCalliHelper)) { return true; } #endif // !DACCESS_COMPILE return false; } BOOL InteropDispatchStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { LIMITED_METHOD_CONTRACT; LOG((LF_CORDB, LL_EVERYTHING, "InteropDispatchStubManager::DoTraceStub called\n")); #ifndef DACCESS_COMPILE _ASSERTE(CheckIsStub_Internal(stubStartAddress)); trace->InitForManagerPush(stubStartAddress, this); LOG_TRACE_DESTINATION(trace, stubStartAddress, "InteropDispatchStubManager::DoTraceStub"); return TRUE; #else // !DACCESS_COMPILE trace->InitForOther(NULL); return FALSE; #endif // !DACCESS_COMPILE } #ifndef DACCESS_COMPILE BOOL InteropDispatchStubManager::TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext); TADDR arg = StubManagerHelpers::GetHiddenArg(pContext); // IL stub may not exist at this point so we init directly for the target (TODO?) if (IsVarargPInvokeStub(GetIP(pContext))) { NDirectMethodDesc *pNMD = (NDirectMethodDesc *)arg; _ASSERTE(pNMD->IsNDirect()); PCODE target = (PCODE)pNMD->GetNDirectTarget(); LOG((LF_CORDB, LL_INFO10000, "IDSM::TraceManager: Vararg P/Invoke case 0x%p\n", target)); trace->InitForUnmanaged(target); } else if (GetIP(pContext) == GetEEFuncEntryPoint(GenericPInvokeCalliHelper)) { PCODE target = (PCODE)arg; LOG((LF_CORDB, LL_INFO10000, "IDSM::TraceManager: Unmanaged CALLI case 0x%p\n", target)); trace->InitForUnmanaged(target); } #ifdef FEATURE_COMINTEROP else { ComPlusCallMethodDesc *pCMD = (ComPlusCallMethodDesc *)arg; _ASSERTE(pCMD->IsComPlusCall()); Object * pThis = StubManagerHelpers::GetThisPtr(pContext); { if (!pCMD->m_pComPlusCallInfo->m_pInterfaceMT->IsComEventItfType() && (pCMD->m_pComPlusCallInfo->m_pILStub != NULL)) { // Early-bound CLR->COM call - continue in the IL stub trace->InitForStub(pCMD->m_pComPlusCallInfo->m_pILStub); } else { // Late-bound CLR->COM call - continue in target's IDispatch::Invoke OBJECTREF oref = ObjectToOBJECTREF(pThis); GCPROTECT_BEGIN(oref); MethodTable *pItfMT = pCMD->m_pComPlusCallInfo->m_pInterfaceMT; _ASSERTE(pItfMT->GetComInterfaceType() == ifDispatch); SafeComHolder<IUnknown> pUnk = ComObject::GetComIPFromRCWThrowing(&oref, pItfMT); LPVOID *lpVtbl = *(LPVOID **)(IUnknown *)pUnk; PCODE target = (PCODE)lpVtbl[6]; // DISPATCH_INVOKE_SLOT; LOG((LF_CORDB, LL_INFO10000, "IDSM::TraceManager: CLR-to-COM late-bound case 0x%p\n", target)); trace->InitForUnmanaged(target); GCPROTECT_END(); } } } #endif // FEATURE_COMINTEROP return TRUE; } #endif //!DACCESS_COMPILE // // Since we don't generate delegate invoke stubs at runtime on IA64, we // can't use the StubLinkStubManager for these stubs. Instead, we create // an additional DelegateInvokeStubManager instead. // SPTR_IMPL(DelegateInvokeStubManager, DelegateInvokeStubManager, g_pManager); #ifndef DACCESS_COMPILE // static void DelegateInvokeStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END g_pManager = new DelegateInvokeStubManager(); StubManager::AddStubManager(g_pManager); } BOOL DelegateInvokeStubManager::AddStub(Stub* pStub) { WRAPPER_NO_CONTRACT; PCODE start = pStub->GetEntryPoint(); // We don't really care about the size here. We only stop in these stubs at the first instruction, // so we'll never be asked to claim an address in the middle of a stub. return GetRangeList()->AddRange((BYTE *)start, (BYTE *)start + 1, (LPVOID)start); } void DelegateInvokeStubManager::RemoveStub(Stub* pStub) { WRAPPER_NO_CONTRACT; PCODE start = pStub->GetEntryPoint(); // We don't really care about the size here. We only stop in these stubs at the first instruction, // so we'll never be asked to claim an address in the middle of a stub. GetRangeList()->RemoveRanges((LPVOID)start); } #endif BOOL DelegateInvokeStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { LIMITED_METHOD_DAC_CONTRACT; bool fIsStub = false; #ifndef DACCESS_COMPILE #ifndef TARGET_X86 fIsStub = fIsStub || (stubStartAddress == GetEEFuncEntryPoint(SinglecastDelegateInvokeStub)); #endif #endif // !DACCESS_COMPILE fIsStub = fIsStub || GetRangeList()->IsInRange(stubStartAddress); return fIsStub; } BOOL DelegateInvokeStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { LIMITED_METHOD_CONTRACT; LOG((LF_CORDB, LL_EVERYTHING, "DelegateInvokeStubManager::DoTraceStub called\n")); _ASSERTE(CheckIsStub_Internal(stubStartAddress)); // If it's a MC delegate, then we want to set a BP & do a context-ful // manager push, so that we can figure out if this call will be to a // single multicast delegate or a multi multicast delegate trace->InitForManagerPush(stubStartAddress, this); LOG_TRACE_DESTINATION(trace, stubStartAddress, "DelegateInvokeStubManager::DoTraceStub"); return TRUE; } #if !defined(DACCESS_COMPILE) BOOL DelegateInvokeStubManager::TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr) { CONTRACTL { MODE_COOPERATIVE; } CONTRACTL_END; PCODE destAddr; PCODE pc; pc = ::GetIP(pContext); BYTE* pThis; pThis = NULL; // Retrieve the this pointer from the context. #if defined(TARGET_X86) (*pRetAddr) = *(BYTE **)(size_t)(pContext->Esp); pThis = (BYTE*)(size_t)(pContext->Ecx); destAddr = *(PCODE*)(pThis + DelegateObject::GetOffsetOfMethodPtrAux()); #elif defined(TARGET_AMD64) // <TODO> // We need to check whether the following is the correct return address. // </TODO> (*pRetAddr) = *(BYTE **)(size_t)(pContext->Rsp); LOG((LF_CORDB, LL_INFO10000, "DISM:TM at 0x%p, retAddr is 0x%p\n", pc, (*pRetAddr))); DELEGATEREF orDelegate; if (GetEEFuncEntryPoint(SinglecastDelegateInvokeStub) == pc) { LOG((LF_CORDB, LL_INFO10000, "DISM::TraceManager: isSingle\n")); orDelegate = (DELEGATEREF)ObjectToOBJECTREF(StubManagerHelpers::GetThisPtr(pContext)); // _methodPtr is where we are going to next. However, in ngen cases, we may have a shuffle thunk // burned into the ngen image, in which case the shuffle thunk is not added to the range list of // the DelegateInvokeStubManager. So we use _methodPtrAux as a fallback. destAddr = orDelegate->GetMethodPtr(); if (StubManager::TraceStub(destAddr, trace)) { LOG((LF_CORDB,LL_INFO10000, "DISM::TM: ppbDest: 0x%p\n", destAddr)); LOG((LF_CORDB,LL_INFO10000, "DISM::TM: res: 1, result type: %d\n", trace->GetTraceType())); return TRUE; } } else { // We get here if we are stopped at the beginning of a shuffle thunk. // The next address we are going to is _methodPtrAux. Stub* pStub = Stub::RecoverStub(pc); // We use the patch offset field to indicate whether the stub has a hidden return buffer argument. // This field is set in SetupShuffleThunk(). if (pStub->GetPatchOffset() != 0) { // This stub has a hidden return buffer argument. orDelegate = (DELEGATEREF)ObjectToOBJECTREF(StubManagerHelpers::GetSecondArg(pContext)); } else { orDelegate = (DELEGATEREF)ObjectToOBJECTREF(StubManagerHelpers::GetThisPtr(pContext)); } } destAddr = orDelegate->GetMethodPtrAux(); #elif defined(TARGET_ARM) (*pRetAddr) = (BYTE *)(size_t)(pContext->Lr); pThis = (BYTE*)(size_t)(pContext->R0); // Could be in the singlecast invoke stub (in which case the next destination is in _methodPtr) or a // shuffle thunk (destination in _methodPtrAux). int offsetOfNextDest; if (pc == GetEEFuncEntryPoint(SinglecastDelegateInvokeStub)) offsetOfNextDest = DelegateObject::GetOffsetOfMethodPtr(); else offsetOfNextDest = DelegateObject::GetOffsetOfMethodPtrAux(); destAddr = *(PCODE*)(pThis + offsetOfNextDest); #elif defined(TARGET_ARM64) (*pRetAddr) = (BYTE *)(size_t)(pContext->Lr); pThis = (BYTE*)(size_t)(pContext->X0); // Could be in the singlecast invoke stub (in which case the next destination is in _methodPtr) or a // shuffle thunk (destination in _methodPtrAux). int offsetOfNextDest; if (pc == GetEEFuncEntryPoint(SinglecastDelegateInvokeStub)) offsetOfNextDest = DelegateObject::GetOffsetOfMethodPtr(); else offsetOfNextDest = DelegateObject::GetOffsetOfMethodPtrAux(); destAddr = *(PCODE*)(pThis + offsetOfNextDest); #else PORTABILITY_ASSERT("DelegateInvokeStubManager::TraceManager"); destAddr = NULL; #endif LOG((LF_CORDB,LL_INFO10000, "DISM::TM: ppbDest: 0x%p\n", destAddr)); BOOL res = StubManager::TraceStub(destAddr, trace); LOG((LF_CORDB,LL_INFO10000, "DISM::TM: res: %d, result type: %d\n", res, trace->GetTraceType())); return res; } // static BOOL DelegateInvokeStubManager::TraceDelegateObject(BYTE* pbDel, TraceDestination *trace) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; BYTE **ppbDest = NULL; // If we got here, then we're here b/c we're at the start of a delegate stub // need to figure out the kind of delegates we are dealing with BYTE *pbDelInvocationList = *(BYTE **)(pbDel + DelegateObject::GetOffsetOfInvocationList()); LOG((LF_CORDB,LL_INFO10000, "DISM::TMI: invocationList: 0x%p\n", pbDelInvocationList)); if (pbDelInvocationList == NULL) { // null invocationList can be one of the following: // Instance closed, Instance open non-virt, Instance open virtual, Static closed, Static opened, Unmanaged FtnPtr // Instance open virtual is complex and we need to figure out what to do (TODO). // For the others the logic is the following: // if _methodPtrAux is 0 the target is in _methodPtr, otherwise the taret is _methodPtrAux ppbDest = (BYTE **)(pbDel + DelegateObject::GetOffsetOfMethodPtrAux()); if (*ppbDest == NULL) { ppbDest = (BYTE **)(pbDel + DelegateObject::GetOffsetOfMethodPtr()); if (*ppbDest == NULL) { // it's not looking good, bail out LOG((LF_CORDB,LL_INFO10000, "DISM(DelegateStub)::TM: can't trace into it\n")); return FALSE; } } LOG((LF_CORDB,LL_INFO10000, "DISM(DelegateStub)::TM: ppbDest: 0x%p *ppbDest:0x%p\n", ppbDest, *ppbDest)); BOOL res = StubManager::TraceStub((PCODE) (*ppbDest), trace); LOG((LF_CORDB,LL_INFO10000, "DISM(MCDel)::TM: res: %d, result type: %d\n", res, trace->GetTraceType())); return res; } // invocationList is not null, so it can be one of the following: // Multicast, Static closed (special sig), Secure // rule out the static with special sig BYTE *pbCount = *(BYTE **)(pbDel + DelegateObject::GetOffsetOfInvocationCount()); if (!pbCount) { // it's a static closed, the target lives in _methodAuxPtr ppbDest = (BYTE **)(pbDel + DelegateObject::GetOffsetOfMethodPtrAux()); if (*ppbDest == NULL) { // it's not looking good, bail out LOG((LF_CORDB,LL_INFO10000, "DISM(DelegateStub)::TM: can't trace into it\n")); return FALSE; } LOG((LF_CORDB,LL_INFO10000, "DISM(DelegateStub)::TM: ppbDest: 0x%p *ppbDest:0x%p\n", ppbDest, *ppbDest)); BOOL res = StubManager::TraceStub((PCODE) (*ppbDest), trace); LOG((LF_CORDB,LL_INFO10000, "DISM(MCDel)::TM: res: %d, result type: %d\n", res, trace->GetTraceType())); return res; } MethodTable *pType = *(MethodTable**)pbDelInvocationList; if (pType->IsDelegate()) { // this is a secure deelgate. The target is hidden inside this field, so recurse in and pray... return TraceDelegateObject(pbDelInvocationList, trace); } // Otherwise, we're going for the first invoke of the multi case. // In order to go to the correct spot, we have just have to fish out // slot 0 of the invocation list, and figure out where that's going to, // then put a breakpoint there... pbDel = *(BYTE**)(((ArrayBase *)pbDelInvocationList)->GetDataPtr()); return TraceDelegateObject(pbDel, trace); } #endif // DACCESS_COMPILE #if defined(TARGET_X86) && !defined(UNIX_X86_ABI) #if !defined(DACCESS_COMPILE) // static void TailCallStubManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END StubManager::AddStubManager(new TailCallStubManager()); } bool TailCallStubManager::IsTailCallJitHelper(PCODE code) { LIMITED_METHOD_CONTRACT; return code == GetEEFuncEntryPoint(JIT_TailCall); } #endif // !DACCESS_COMPILED BOOL TailCallStubManager::CheckIsStub_Internal(PCODE stubStartAddress) { LIMITED_METHOD_DAC_CONTRACT; bool fIsStub = false; #if !defined(DACCESS_COMPILE) fIsStub = IsTailCallJitHelper(stubStartAddress); #endif // !DACCESS_COMPILE return fIsStub; } #if !defined(DACCESS_COMPILE) EXTERN_C void STDCALL JIT_TailCallLeave(); EXTERN_C void STDCALL JIT_TailCallVSDLeave(); BOOL TailCallStubManager::TraceManager(Thread * pThread, TraceDestination * pTrace, T_CONTEXT * pContext, BYTE ** ppRetAddr) { WRAPPER_NO_CONTRACT; TADDR esp = GetSP(pContext); TADDR ebp = GetFP(pContext); // Check if we are stopped at the beginning of JIT_TailCall(). if (GetIP(pContext) == GetEEFuncEntryPoint(JIT_TailCall)) { // There are two cases in JIT_TailCall(). The first one is a normal tail call. // The second one is a tail call to a virtual method. *ppRetAddr = *(reinterpret_cast<BYTE **>(ebp + sizeof(SIZE_T))); // Check whether this is a VSD tail call. SIZE_T flags = *(reinterpret_cast<SIZE_T *>(esp + JIT_TailCall_StackOffsetToFlags)); if (flags & 0x2) { // This is a VSD tail call. pTrace->InitForManagerPush(GetEEFuncEntryPoint(JIT_TailCallVSDLeave), this); return TRUE; } else { // This is not a VSD tail call. pTrace->InitForManagerPush(GetEEFuncEntryPoint(JIT_TailCallLeave), this); return TRUE; } } else { if (GetIP(pContext) == GetEEFuncEntryPoint(JIT_TailCallLeave)) { // This is the simple case. The tail call goes directly to the target. There won't be an // explicit frame on the stack. We should be right at the return instruction which branches to // the call target. The return address is stored in the second leafmost stack slot. *ppRetAddr = *(reinterpret_cast<BYTE **>(esp + sizeof(SIZE_T))); } else { _ASSERTE(GetIP(pContext) == GetEEFuncEntryPoint(JIT_TailCallVSDLeave)); // This is the VSD case. The tail call goes through a assembly helper function which sets up // and tears down an explicit frame. In this case, the return address is at the same place // as on entry to JIT_TailCall(). *ppRetAddr = *(reinterpret_cast<BYTE **>(ebp + sizeof(SIZE_T))); } // In both cases, the target address is stored in the leafmost stack slot. pTrace->InitForStub((PCODE)*reinterpret_cast<SIZE_T *>(esp)); return TRUE; } } #endif // !DACCESS_COMPILE BOOL TailCallStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) { WRAPPER_NO_CONTRACT; LOG((LF_CORDB, LL_EVERYTHING, "TailCallStubManager::DoTraceStub called\n")); BOOL fResult = FALSE; // Make sure we are stopped at the beginning of JIT_TailCall(). _ASSERTE(CheckIsStub_Internal(stubStartAddress)); trace->InitForManagerPush(stubStartAddress, this); fResult = TRUE; LOG_TRACE_DESTINATION(trace, stubStartAddress, "TailCallStubManager::DoTraceStub"); return fResult; } #endif // TARGET_X86 && !UNIX_X86_ABI #ifdef DACCESS_COMPILE void PrecodeStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p PrecodeStubManager\n", dac_cast<TADDR>(this))); GetStubPrecodeRangeList()->EnumMemoryRegions(flags); GetFixupPrecodeRangeList()->EnumMemoryRegions(flags); } void StubLinkStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p StubLinkStubManager\n", dac_cast<TADDR>(this))); GetRangeList()->EnumMemoryRegions(flags); } void ThunkHeapStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p ThunkHeapStubManager\n", dac_cast<TADDR>(this))); GetRangeList()->EnumMemoryRegions(flags); } void JumpStubStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p JumpStubStubManager\n", dac_cast<TADDR>(this))); } void RangeSectionStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p RangeSectionStubManager\n", dac_cast<TADDR>(this))); } void ILStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p ILStubManager\n", dac_cast<TADDR>(this))); } void InteropDispatchStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p InteropDispatchStubManager\n", dac_cast<TADDR>(this))); } void DelegateInvokeStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p DelegateInvokeStubManager\n", dac_cast<TADDR>(this))); GetRangeList()->EnumMemoryRegions(flags); } void VirtualCallStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p VirtualCallStubManager\n", dac_cast<TADDR>(this))); GetLookupRangeList()->EnumMemoryRegions(flags); GetResolveRangeList()->EnumMemoryRegions(flags); GetDispatchRangeList()->EnumMemoryRegions(flags); GetCacheEntryRangeList()->EnumMemoryRegions(flags); } #if defined(TARGET_X86) && !defined(UNIX_X86_ABI) void TailCallStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p TailCallStubManager\n", dac_cast<TADDR>(this))); } #endif #endif // #ifdef DACCESS_COMPILE
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/classlibnative/float/floatsingle.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: FloatSingle.cpp // #include <common.h> #include "floatsingle.h" // The default compilation mode is /fp:precise, which disables floating-point intrinsics. This // default compilation mode has previously caused performance regressions in floating-point code. // We enable /fp:fast semantics for the majority of the math functions, as it will speed up performance // and is really unlikely to cause any other code regressions. //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// /// /// beginning of /fp:fast scope /// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// #ifdef _MSC_VER #pragma float_control(push) #pragma float_control(precise, off) #endif /*=====================================Acos===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Acos, float x) FCALL_CONTRACT; return acosf(x); FCIMPLEND /*=====================================Acosh==================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Acosh, float x) FCALL_CONTRACT; return acoshf(x); FCIMPLEND /*=====================================Asin===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Asin, float x) FCALL_CONTRACT; return asinf(x); FCIMPLEND /*=====================================Asinh==================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Asinh, float x) FCALL_CONTRACT; return asinhf(x); FCIMPLEND /*=====================================Atan===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Atan, float x) FCALL_CONTRACT; return atanf(x); FCIMPLEND /*=====================================Atanh==================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Atanh, float x) FCALL_CONTRACT; return atanhf(x); FCIMPLEND /*=====================================Atan2==================================== ** ==============================================================================*/ FCIMPL2_VV(float, COMSingle::Atan2, float y, float x) FCALL_CONTRACT; return atan2f(y, x); FCIMPLEND /*====================================Cbrt====================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Cbrt, float x) FCALL_CONTRACT; return cbrtf(x); FCIMPLEND #if defined(_MSC_VER) && defined(TARGET_AMD64) // The /fp:fast form of `ceilf` for AMD64 does not correctly handle: `-1.0 < value <= -0.0` // https://github.com/dotnet/runtime/issues/11003 #pragma float_control(push) #pragma float_control(precise, on) #endif /*====================================Ceil====================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Ceil, float x) FCALL_CONTRACT; return ceilf(x); FCIMPLEND #if defined(_MSC_VER) && defined(TARGET_AMD64) #pragma float_control(pop) #endif /*=====================================Cos====================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Cos, float x) FCALL_CONTRACT; return cosf(x); FCIMPLEND /*=====================================Cosh===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Cosh, float x) FCALL_CONTRACT; return coshf(x); FCIMPLEND /*=====================================Exp====================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Exp, float x) FCALL_CONTRACT; return expf(x); FCIMPLEND /*====================================Floor===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Floor, float x) FCALL_CONTRACT; return floorf(x); FCIMPLEND /*=====================================FMod===================================== ** ==============================================================================*/ FCIMPL2_VV(float, COMSingle::FMod, float x, float y) FCALL_CONTRACT; return fmodf(x, y); FCIMPLEND /*=====================================FusedMultiplyAdd========================== ** ==============================================================================*/ FCIMPL3_VVV(float, COMSingle::FusedMultiplyAdd, float x, float y, float z) FCALL_CONTRACT; return fmaf(x, y, z); FCIMPLEND /*=====================================Log====================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Log, float x) FCALL_CONTRACT; return logf(x); FCIMPLEND /*=====================================Log2===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Log2, float x) FCALL_CONTRACT; return log2f(x); FCIMPLEND /*====================================Log10===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Log10, float x) FCALL_CONTRACT; return log10f(x); FCIMPLEND /*=====================================ModF===================================== ** ==============================================================================*/ FCIMPL2_VI(float, COMSingle::ModF, float x, float* intptr) FCALL_CONTRACT; return modff(x, intptr); FCIMPLEND /*=====================================Pow====================================== ** ==============================================================================*/ FCIMPL2_VV(float, COMSingle::Pow, float x, float y) FCALL_CONTRACT; return powf(x, y); FCIMPLEND /*=====================================Sin====================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Sin, float x) FCALL_CONTRACT; return sinf(x); FCIMPLEND /*====================================SinCos==================================== ** ==============================================================================*/ FCIMPL3_VII(void, COMSingle::SinCos, float x, float* pSin, float* pCos) FCALL_CONTRACT; #ifdef _MSC_VER *pSin = sinf(x); *pCos = cosf(x); #else sincosf(x, pSin, pCos); #endif FCIMPLEND /*=====================================Sinh===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Sinh, float x) FCALL_CONTRACT; return sinhf(x); FCIMPLEND /*=====================================Sqrt===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Sqrt, float x) FCALL_CONTRACT; return sqrtf(x); FCIMPLEND /*=====================================Tan====================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Tan, float x) FCALL_CONTRACT; return tanf(x); FCIMPLEND /*=====================================Tanh===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Tanh, float x) FCALL_CONTRACT; return tanhf(x); FCIMPLEND #ifdef _MSC_VER #pragma float_control(pop) #endif //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// /// /// End of /fp:fast scope /// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: FloatSingle.cpp // #include <common.h> #include "floatsingle.h" // The default compilation mode is /fp:precise, which disables floating-point intrinsics. This // default compilation mode has previously caused performance regressions in floating-point code. // We enable /fp:fast semantics for the majority of the math functions, as it will speed up performance // and is really unlikely to cause any other code regressions. //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// /// /// beginning of /fp:fast scope /// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// #ifdef _MSC_VER #pragma float_control(push) #pragma float_control(precise, off) #endif /*=====================================Acos===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Acos, float x) FCALL_CONTRACT; return acosf(x); FCIMPLEND /*=====================================Acosh==================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Acosh, float x) FCALL_CONTRACT; return acoshf(x); FCIMPLEND /*=====================================Asin===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Asin, float x) FCALL_CONTRACT; return asinf(x); FCIMPLEND /*=====================================Asinh==================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Asinh, float x) FCALL_CONTRACT; return asinhf(x); FCIMPLEND /*=====================================Atan===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Atan, float x) FCALL_CONTRACT; return atanf(x); FCIMPLEND /*=====================================Atanh==================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Atanh, float x) FCALL_CONTRACT; return atanhf(x); FCIMPLEND /*=====================================Atan2==================================== ** ==============================================================================*/ FCIMPL2_VV(float, COMSingle::Atan2, float y, float x) FCALL_CONTRACT; return atan2f(y, x); FCIMPLEND /*====================================Cbrt====================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Cbrt, float x) FCALL_CONTRACT; return cbrtf(x); FCIMPLEND #if defined(_MSC_VER) && defined(TARGET_AMD64) // The /fp:fast form of `ceilf` for AMD64 does not correctly handle: `-1.0 < value <= -0.0` // https://github.com/dotnet/runtime/issues/11003 #pragma float_control(push) #pragma float_control(precise, on) #endif /*====================================Ceil====================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Ceil, float x) FCALL_CONTRACT; return ceilf(x); FCIMPLEND #if defined(_MSC_VER) && defined(TARGET_AMD64) #pragma float_control(pop) #endif /*=====================================Cos====================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Cos, float x) FCALL_CONTRACT; return cosf(x); FCIMPLEND /*=====================================Cosh===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Cosh, float x) FCALL_CONTRACT; return coshf(x); FCIMPLEND /*=====================================Exp====================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Exp, float x) FCALL_CONTRACT; return expf(x); FCIMPLEND /*====================================Floor===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Floor, float x) FCALL_CONTRACT; return floorf(x); FCIMPLEND /*=====================================FMod===================================== ** ==============================================================================*/ FCIMPL2_VV(float, COMSingle::FMod, float x, float y) FCALL_CONTRACT; return fmodf(x, y); FCIMPLEND /*=====================================FusedMultiplyAdd========================== ** ==============================================================================*/ FCIMPL3_VVV(float, COMSingle::FusedMultiplyAdd, float x, float y, float z) FCALL_CONTRACT; return fmaf(x, y, z); FCIMPLEND /*=====================================Log====================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Log, float x) FCALL_CONTRACT; return logf(x); FCIMPLEND /*=====================================Log2===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Log2, float x) FCALL_CONTRACT; return log2f(x); FCIMPLEND /*====================================Log10===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Log10, float x) FCALL_CONTRACT; return log10f(x); FCIMPLEND /*=====================================ModF===================================== ** ==============================================================================*/ FCIMPL2_VI(float, COMSingle::ModF, float x, float* intptr) FCALL_CONTRACT; return modff(x, intptr); FCIMPLEND /*=====================================Pow====================================== ** ==============================================================================*/ FCIMPL2_VV(float, COMSingle::Pow, float x, float y) FCALL_CONTRACT; return powf(x, y); FCIMPLEND /*=====================================Sin====================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Sin, float x) FCALL_CONTRACT; return sinf(x); FCIMPLEND /*====================================SinCos==================================== ** ==============================================================================*/ FCIMPL3_VII(void, COMSingle::SinCos, float x, float* pSin, float* pCos) FCALL_CONTRACT; #ifdef _MSC_VER *pSin = sinf(x); *pCos = cosf(x); #else sincosf(x, pSin, pCos); #endif FCIMPLEND /*=====================================Sinh===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Sinh, float x) FCALL_CONTRACT; return sinhf(x); FCIMPLEND /*=====================================Sqrt===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Sqrt, float x) FCALL_CONTRACT; return sqrtf(x); FCIMPLEND /*=====================================Tan====================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Tan, float x) FCALL_CONTRACT; return tanf(x); FCIMPLEND /*=====================================Tanh===================================== ** ==============================================================================*/ FCIMPL1_V(float, COMSingle::Tanh, float x) FCALL_CONTRACT; return tanhf(x); FCIMPLEND #ifdef _MSC_VER #pragma float_control(pop) #endif //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// /// /// End of /fp:fast scope /// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/md/enc/metamodelenc.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // MetaModelENC.cpp // // // Implementation for applying ENC deltas to a MiniMd. // //***************************************************************************** #include "stdafx.h" #include <limits.h> #include <posterror.h> #include <metamodelrw.h> #include <stgio.h> #include <stgtiggerstorage.h> #include "mdlog.h" #include "rwutil.h" ULONG CMiniMdRW::m_SuppressedDeltaColumns[TBL_COUNT] = {0}; //***************************************************************************** // Copy the data from one MiniMd to another. //***************************************************************************** __checkReturn HRESULT CMiniMdRW::ApplyRecordDelta( CMiniMdRW &mdDelta, // The delta MetaData. ULONG ixTbl, // The table with the data. void *pDelta, // The delta MetaData record. void *pRecord) // The record to update. { HRESULT hr = S_OK; ULONG mask = m_SuppressedDeltaColumns[ixTbl]; for (ULONG ixCol = 0; ixCol<m_TableDefs[ixTbl].m_cCols; ++ixCol, mask >>= 1) { // Skip certain pointer columns. if (mask & 0x01) continue; ULONG val = mdDelta.GetCol(ixTbl, ixCol, pDelta); IfFailRet(PutCol(ixTbl, ixCol, pRecord, val)); } return hr; } // CMiniMdRW::ApplyRecordDelta //***************************************************************************** // Apply a delta record to a table, generically. //***************************************************************************** __checkReturn HRESULT CMiniMdRW::ApplyTableDelta( CMiniMdRW &mdDelta, // Interface to MD with the ENC delta. ULONG ixTbl, // Table index to update. RID iRid, // RID of the changed item. int fc) // Function code of update. { HRESULT hr = S_OK; void *pRec; // Record in existing MetaData. void *pDeltaRec; // Record if Delta MetaData. RID newRid; // Rid of new record. // Get the delta record. IfFailGo(mdDelta.GetDeltaRecord(ixTbl, iRid, &pDeltaRec)); // Get the record from the base metadata. if (iRid > m_Schema.m_cRecs[ixTbl]) { // Added record. Each addition is the next one. _ASSERTE(iRid == m_Schema.m_cRecs[ixTbl] + 1); switch (ixTbl) { case TBL_TypeDef: IfFailGo(AddTypeDefRecord(reinterpret_cast<TypeDefRec **>(&pRec), &newRid)); break; case TBL_Method: IfFailGo(AddMethodRecord(reinterpret_cast<MethodRec **>(&pRec), &newRid)); break; case TBL_EventMap: IfFailGo(AddEventMapRecord(reinterpret_cast<EventMapRec **>(&pRec), &newRid)); break; case TBL_PropertyMap: IfFailGo(AddPropertyMapRecord(reinterpret_cast<PropertyMapRec **>(&pRec), &newRid)); break; default: IfFailGo(AddRecord(ixTbl, &pRec, &newRid)); break; } IfNullGo(pRec); _ASSERTE(iRid == newRid); } else { // Updated record. IfFailGo(getRow(ixTbl, iRid, &pRec)); } // Copy the record info. IfFailGo(ApplyRecordDelta(mdDelta, ixTbl, pDeltaRec, pRec)); ErrExit: return hr; } // CMiniMdRW::ApplyTableDelta //***************************************************************************** // Get the record from a Delta MetaData that corresponds to the actual record. //***************************************************************************** __checkReturn HRESULT CMiniMdRW::GetDeltaRecord( ULONG ixTbl, // Table. ULONG iRid, // Record in the table. void **ppRecord) { HRESULT hr; ULONG iMap; // RID in map table. ENCMapRec *pMap; // Row in map table. *ppRecord = NULL; // If no remap, just return record directly. if ((m_Schema.m_cRecs[TBL_ENCMap] == 0) || (ixTbl == TBL_Module) || !IsMinimalDelta()) { return getRow(ixTbl, iRid, ppRecord); } // Use the remap table to find the physical row containing this logical row. iMap = (*m_rENCRecs)[ixTbl]; IfFailRet(GetENCMapRecord(iMap, &pMap)); // Search for desired record. while ((TblFromRecId(pMap->GetToken()) == ixTbl) && (RidFromRecId(pMap->GetToken()) < iRid)) { IfFailRet(GetENCMapRecord(++iMap, &pMap)); } _ASSERTE((TblFromRecId(pMap->GetToken()) == ixTbl) && (RidFromRecId(pMap->GetToken()) == iRid)); // Relative position within table's group in map is physical rid. iRid = iMap - (*m_rENCRecs)[ixTbl] + 1; return getRow(ixTbl, iRid, ppRecord); } // CMiniMdRW::GetDeltaRecord //***************************************************************************** // Given a MetaData with ENC changes, apply those changes to this MetaData. //***************************************************************************** __checkReturn HRESULT CMiniMdRW::ApplyHeapDeltas( CMiniMdRW &mdDelta) // Interface to MD with the ENC delta. { if (mdDelta.IsMinimalDelta()) { return ApplyHeapDeltasWithMinimalDelta(mdDelta); } else { return ApplyHeapDeltasWithFullDelta(mdDelta); } }// CMiniMdRW::ApplyHeapDeltas __checkReturn HRESULT CMiniMdRW::ApplyHeapDeltasWithMinimalDelta( CMiniMdRW &mdDelta) // Interface to MD with the ENC delta. { HRESULT hr = S_OK; // Extend the heaps with EnC minimal delta IfFailGo(m_StringHeap.AddStringHeap( &(mdDelta.m_StringHeap), 0)); // Start offset in the mdDelta IfFailGo(m_BlobHeap.AddBlobHeap( &(mdDelta.m_BlobHeap), 0)); // Start offset in the mdDelta IfFailGo(m_UserStringHeap.AddBlobHeap( &(mdDelta.m_UserStringHeap), 0)); // Start offset in the mdDelta // We never do a minimal delta with the guid heap IfFailGo(m_GuidHeap.AddGuidHeap( &(mdDelta.m_GuidHeap), m_GuidHeap.GetSize())); // Starting offset in the full delta guid heap ErrExit: return hr; } // CMiniMdRW::ApplyHeapDeltasWithMinimalDelta __checkReturn HRESULT CMiniMdRW::ApplyHeapDeltasWithFullDelta( CMiniMdRW &mdDelta) // Interface to MD with the ENC delta. { HRESULT hr = S_OK; // Extend the heaps with EnC full delta IfFailRet(m_StringHeap.AddStringHeap( &(mdDelta.m_StringHeap), m_StringHeap.GetUnalignedSize())); // Starting offset in the full delta string heap IfFailRet(m_BlobHeap.AddBlobHeap( &(mdDelta.m_BlobHeap), m_BlobHeap.GetUnalignedSize())); // Starting offset in the full delta blob heap IfFailRet(m_UserStringHeap.AddBlobHeap( &(mdDelta.m_UserStringHeap), m_UserStringHeap.GetUnalignedSize())); // Starting offset in the full delta user string heap IfFailRet(m_GuidHeap.AddGuidHeap( &(mdDelta.m_GuidHeap), m_GuidHeap.GetSize())); // Starting offset in the full delta guid heap return hr; } // CMiniMdRW::ApplyHeapDeltasWithFullDelta //***************************************************************************** // Driver for the delta process. //***************************************************************************** __checkReturn HRESULT CMiniMdRW::ApplyDelta( CMiniMdRW &mdDelta) // Interface to MD with the ENC delta. { HRESULT hr = S_OK; ULONG iENC; // Loop control. RID iRid; // RID of some record. RID iNew; // RID of a new record. int i; // Loop control. ULONG ixTbl; // A table. #ifdef _DEBUG if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MD_ApplyDeltaBreak)) { _ASSERTE(!"CMiniMDRW::ApplyDelta()"); } #endif // _DEBUG // Init the suppressed column table. We know this one isn't zero... if (m_SuppressedDeltaColumns[TBL_TypeDef] == 0) { m_SuppressedDeltaColumns[TBL_EventMap] = (1 << EventMapRec::COL_EventList); m_SuppressedDeltaColumns[TBL_PropertyMap] = (1 << PropertyMapRec::COL_PropertyList); m_SuppressedDeltaColumns[TBL_EventMap] = (1 << EventMapRec::COL_EventList); m_SuppressedDeltaColumns[TBL_Method] = (1 << MethodRec::COL_ParamList); m_SuppressedDeltaColumns[TBL_TypeDef] = (1 << TypeDefRec::COL_FieldList)|(1<<TypeDefRec::COL_MethodList); } // Verify the version of the MD. if (m_Schema.m_major != mdDelta.m_Schema.m_major || m_Schema.m_minor != mdDelta.m_Schema.m_minor) { _ASSERTE(!"Version of Delta MetaData is a incompatible with current MetaData."); //<TODO>@FUTURE: unique error in the future since we are not shipping ENC.</TODO> return E_INVALIDARG; } // verify MVIDs. ModuleRec *pModDelta; ModuleRec *pModBase; IfFailGo(mdDelta.GetModuleRecord(1, &pModDelta)); IfFailGo(GetModuleRecord(1, &pModBase)); GUID GuidDelta; GUID GuidBase; IfFailGo(mdDelta.getMvidOfModule(pModDelta, &GuidDelta)); IfFailGo(getMvidOfModule(pModBase, &GuidBase)); if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MD_DeltaCheck) && (GuidDelta != GuidBase)) { _ASSERTE(!"Delta MetaData has different base than current MetaData."); return E_INVALIDARG; } // Let the other md prepare for sparse records. IfFailGo(mdDelta.StartENCMap()); // Fix the heaps. IfFailGo(ApplyHeapDeltas(mdDelta)); // Truncate some tables in preparation to copy in new ENCLog data. for (i = 0; (ixTbl = m_TruncatedEncTables[i]) != (ULONG)-1; ++i) { m_Tables[ixTbl].Delete(); IfFailGo(m_Tables[ixTbl].InitializeEmpty_WithRecordCount( m_TableDefs[ixTbl].m_cbRec, mdDelta.m_Schema.m_cRecs[ixTbl] COMMA_INDEBUG_MD(TRUE))); // fIsReadWrite INDEBUG_MD(m_Tables[ixTbl].Debug_SetTableInfo(NULL, ixTbl)); m_Schema.m_cRecs[ixTbl] = 0; } // For each record in the ENC log... for (iENC = 1; iENC <= mdDelta.m_Schema.m_cRecs[TBL_ENCLog]; ++iENC) { // Get the record, and the updated token. ENCLogRec *pENC; IfFailGo(mdDelta.GetENCLogRecord(iENC, &pENC)); ENCLogRec *pENC2; IfFailGo(AddENCLogRecord(&pENC2, &iNew)); IfNullGo(pENC2); ENCLogRec *pENC3; _ASSERTE(iNew == iENC); ULONG func = pENC->GetFuncCode(); pENC2->SetFuncCode(pENC->GetFuncCode()); pENC2->SetToken(pENC->GetToken()); // What kind of record is this? if (IsRecId(pENC->GetToken())) { // Non-token table iRid = RidFromRecId(pENC->GetToken()); ixTbl = TblFromRecId(pENC->GetToken()); } else { // Token table. iRid = RidFromToken(pENC->GetToken()); ixTbl = GetTableForToken(pENC->GetToken()); } RID rid_Ignore; // Switch based on the function code. switch (func) { case eDeltaMethodCreate: // Next ENC record will define the new Method. MethodRec *pMethodRecord; IfFailGo(AddMethodRecord(&pMethodRecord, &rid_Ignore)); IfFailGo(AddMethodToTypeDef(iRid, m_Schema.m_cRecs[TBL_Method])); break; case eDeltaParamCreate: // Next ENC record will define the new Param. This record is // tricky because params will be re-ordered based on their sequence, // but the sequence isn't set until the NEXT record is applied. // So, for ParamCreate only, apply the param record delta before // adding the parent-child linkage. ParamRec *pParamRecord; IfFailGo(AddParamRecord(&pParamRecord, &rid_Ignore)); // Should have recorded a Param delta after the Param add. _ASSERTE(iENC<mdDelta.m_Schema.m_cRecs[TBL_ENCLog]); IfFailGo(mdDelta.GetENCLogRecord(iENC+1, &pENC3)); _ASSERTE(pENC3->GetFuncCode() == 0); _ASSERTE(GetTableForToken(pENC3->GetToken()) == TBL_Param); IfFailGo(ApplyTableDelta(mdDelta, TBL_Param, RidFromToken(pENC3->GetToken()), eDeltaFuncDefault)); // Now that Param record is OK, set up linkage. IfFailGo(AddParamToMethod(iRid, m_Schema.m_cRecs[TBL_Param])); break; case eDeltaFieldCreate: // Next ENC record will define the new Field. FieldRec *pFieldRecord; IfFailGo(AddFieldRecord(&pFieldRecord, &rid_Ignore)); IfFailGo(AddFieldToTypeDef(iRid, m_Schema.m_cRecs[TBL_Field])); break; case eDeltaPropertyCreate: // Next ENC record will define the new Property. PropertyRec *pPropertyRecord; IfFailGo(AddPropertyRecord(&pPropertyRecord, &rid_Ignore)); IfFailGo(AddPropertyToPropertyMap(iRid, m_Schema.m_cRecs[TBL_Property])); break; case eDeltaEventCreate: // Next ENC record will define the new Event. EventRec *pEventRecord; IfFailGo(AddEventRecord(&pEventRecord, &rid_Ignore)); IfFailGo(AddEventToEventMap(iRid, m_Schema.m_cRecs[TBL_Event])); break; case eDeltaFuncDefault: IfFailGo(ApplyTableDelta(mdDelta, ixTbl, iRid, func)); break; default: _ASSERTE(!"Unexpected function in ApplyDelta"); IfFailGo(E_UNEXPECTED); break; } } m_Schema.m_cRecs[TBL_ENCLog] = mdDelta.m_Schema.m_cRecs[TBL_ENCLog]; ErrExit: // Store the result for returning (IfFailRet will modify hr) HRESULT hrReturn = hr; IfFailRet(mdDelta.EndENCMap()); return hrReturn; } // CMiniMdRW::ApplyDelta //***************************************************************************** //***************************************************************************** __checkReturn HRESULT CMiniMdRW::StartENCMap() // S_OK or error. { HRESULT hr = S_OK; ULONG iENC; // Loop control. ULONG ixTbl; // A table. int ixTblPrev = -1; // Table previously seen. _ASSERTE(m_rENCRecs == 0); if (m_Schema.m_cRecs[TBL_ENCMap] == 0) return S_OK; // Build an array of pointers into the ENCMap table for fast access to the ENCMap // for each table. m_rENCRecs = new (nothrow) ULONGARRAY; IfNullGo(m_rENCRecs); if (!m_rENCRecs->AllocateBlock(TBL_COUNT)) IfFailGo(E_OUTOFMEMORY); for (iENC = 1; iENC <= m_Schema.m_cRecs[TBL_ENCMap]; ++iENC) { ENCMapRec *pMap; IfFailGo(GetENCMapRecord(iENC, &pMap)); ixTbl = TblFromRecId(pMap->GetToken()); _ASSERTE((int)ixTbl >= ixTblPrev); _ASSERTE(ixTbl < TBL_COUNT); _ASSERTE(ixTbl != TBL_ENCMap); _ASSERTE(ixTbl != TBL_ENCLog); if ((int)ixTbl == ixTblPrev) continue; // Catch up on any skipped tables. while (ixTblPrev < (int)ixTbl) { (*m_rENCRecs)[++ixTblPrev] = iENC; } } while (ixTblPrev < TBL_COUNT-1) { (*m_rENCRecs)[++ixTblPrev] = iENC; } ErrExit: return hr; } // CMiniMdRW::StartENCMap //***************************************************************************** //***************************************************************************** __checkReturn HRESULT CMiniMdRW::EndENCMap() { if (m_rENCRecs != NULL) { delete m_rENCRecs; m_rENCRecs = NULL; } return S_OK; } // CMiniMdRW::EndENCMap
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // MetaModelENC.cpp // // // Implementation for applying ENC deltas to a MiniMd. // //***************************************************************************** #include "stdafx.h" #include <limits.h> #include <posterror.h> #include <metamodelrw.h> #include <stgio.h> #include <stgtiggerstorage.h> #include "mdlog.h" #include "rwutil.h" ULONG CMiniMdRW::m_SuppressedDeltaColumns[TBL_COUNT] = {0}; //***************************************************************************** // Copy the data from one MiniMd to another. //***************************************************************************** __checkReturn HRESULT CMiniMdRW::ApplyRecordDelta( CMiniMdRW &mdDelta, // The delta MetaData. ULONG ixTbl, // The table with the data. void *pDelta, // The delta MetaData record. void *pRecord) // The record to update. { HRESULT hr = S_OK; ULONG mask = m_SuppressedDeltaColumns[ixTbl]; for (ULONG ixCol = 0; ixCol<m_TableDefs[ixTbl].m_cCols; ++ixCol, mask >>= 1) { // Skip certain pointer columns. if (mask & 0x01) continue; ULONG val = mdDelta.GetCol(ixTbl, ixCol, pDelta); IfFailRet(PutCol(ixTbl, ixCol, pRecord, val)); } return hr; } // CMiniMdRW::ApplyRecordDelta //***************************************************************************** // Apply a delta record to a table, generically. //***************************************************************************** __checkReturn HRESULT CMiniMdRW::ApplyTableDelta( CMiniMdRW &mdDelta, // Interface to MD with the ENC delta. ULONG ixTbl, // Table index to update. RID iRid, // RID of the changed item. int fc) // Function code of update. { HRESULT hr = S_OK; void *pRec; // Record in existing MetaData. void *pDeltaRec; // Record if Delta MetaData. RID newRid; // Rid of new record. // Get the delta record. IfFailGo(mdDelta.GetDeltaRecord(ixTbl, iRid, &pDeltaRec)); // Get the record from the base metadata. if (iRid > m_Schema.m_cRecs[ixTbl]) { // Added record. Each addition is the next one. _ASSERTE(iRid == m_Schema.m_cRecs[ixTbl] + 1); switch (ixTbl) { case TBL_TypeDef: IfFailGo(AddTypeDefRecord(reinterpret_cast<TypeDefRec **>(&pRec), &newRid)); break; case TBL_Method: IfFailGo(AddMethodRecord(reinterpret_cast<MethodRec **>(&pRec), &newRid)); break; case TBL_EventMap: IfFailGo(AddEventMapRecord(reinterpret_cast<EventMapRec **>(&pRec), &newRid)); break; case TBL_PropertyMap: IfFailGo(AddPropertyMapRecord(reinterpret_cast<PropertyMapRec **>(&pRec), &newRid)); break; default: IfFailGo(AddRecord(ixTbl, &pRec, &newRid)); break; } IfNullGo(pRec); _ASSERTE(iRid == newRid); } else { // Updated record. IfFailGo(getRow(ixTbl, iRid, &pRec)); } // Copy the record info. IfFailGo(ApplyRecordDelta(mdDelta, ixTbl, pDeltaRec, pRec)); ErrExit: return hr; } // CMiniMdRW::ApplyTableDelta //***************************************************************************** // Get the record from a Delta MetaData that corresponds to the actual record. //***************************************************************************** __checkReturn HRESULT CMiniMdRW::GetDeltaRecord( ULONG ixTbl, // Table. ULONG iRid, // Record in the table. void **ppRecord) { HRESULT hr; ULONG iMap; // RID in map table. ENCMapRec *pMap; // Row in map table. *ppRecord = NULL; // If no remap, just return record directly. if ((m_Schema.m_cRecs[TBL_ENCMap] == 0) || (ixTbl == TBL_Module) || !IsMinimalDelta()) { return getRow(ixTbl, iRid, ppRecord); } // Use the remap table to find the physical row containing this logical row. iMap = (*m_rENCRecs)[ixTbl]; IfFailRet(GetENCMapRecord(iMap, &pMap)); // Search for desired record. while ((TblFromRecId(pMap->GetToken()) == ixTbl) && (RidFromRecId(pMap->GetToken()) < iRid)) { IfFailRet(GetENCMapRecord(++iMap, &pMap)); } _ASSERTE((TblFromRecId(pMap->GetToken()) == ixTbl) && (RidFromRecId(pMap->GetToken()) == iRid)); // Relative position within table's group in map is physical rid. iRid = iMap - (*m_rENCRecs)[ixTbl] + 1; return getRow(ixTbl, iRid, ppRecord); } // CMiniMdRW::GetDeltaRecord //***************************************************************************** // Given a MetaData with ENC changes, apply those changes to this MetaData. //***************************************************************************** __checkReturn HRESULT CMiniMdRW::ApplyHeapDeltas( CMiniMdRW &mdDelta) // Interface to MD with the ENC delta. { if (mdDelta.IsMinimalDelta()) { return ApplyHeapDeltasWithMinimalDelta(mdDelta); } else { return ApplyHeapDeltasWithFullDelta(mdDelta); } }// CMiniMdRW::ApplyHeapDeltas __checkReturn HRESULT CMiniMdRW::ApplyHeapDeltasWithMinimalDelta( CMiniMdRW &mdDelta) // Interface to MD with the ENC delta. { HRESULT hr = S_OK; // Extend the heaps with EnC minimal delta IfFailGo(m_StringHeap.AddStringHeap( &(mdDelta.m_StringHeap), 0)); // Start offset in the mdDelta IfFailGo(m_BlobHeap.AddBlobHeap( &(mdDelta.m_BlobHeap), 0)); // Start offset in the mdDelta IfFailGo(m_UserStringHeap.AddBlobHeap( &(mdDelta.m_UserStringHeap), 0)); // Start offset in the mdDelta // We never do a minimal delta with the guid heap IfFailGo(m_GuidHeap.AddGuidHeap( &(mdDelta.m_GuidHeap), m_GuidHeap.GetSize())); // Starting offset in the full delta guid heap ErrExit: return hr; } // CMiniMdRW::ApplyHeapDeltasWithMinimalDelta __checkReturn HRESULT CMiniMdRW::ApplyHeapDeltasWithFullDelta( CMiniMdRW &mdDelta) // Interface to MD with the ENC delta. { HRESULT hr = S_OK; // Extend the heaps with EnC full delta IfFailRet(m_StringHeap.AddStringHeap( &(mdDelta.m_StringHeap), m_StringHeap.GetUnalignedSize())); // Starting offset in the full delta string heap IfFailRet(m_BlobHeap.AddBlobHeap( &(mdDelta.m_BlobHeap), m_BlobHeap.GetUnalignedSize())); // Starting offset in the full delta blob heap IfFailRet(m_UserStringHeap.AddBlobHeap( &(mdDelta.m_UserStringHeap), m_UserStringHeap.GetUnalignedSize())); // Starting offset in the full delta user string heap IfFailRet(m_GuidHeap.AddGuidHeap( &(mdDelta.m_GuidHeap), m_GuidHeap.GetSize())); // Starting offset in the full delta guid heap return hr; } // CMiniMdRW::ApplyHeapDeltasWithFullDelta //***************************************************************************** // Driver for the delta process. //***************************************************************************** __checkReturn HRESULT CMiniMdRW::ApplyDelta( CMiniMdRW &mdDelta) // Interface to MD with the ENC delta. { HRESULT hr = S_OK; ULONG iENC; // Loop control. RID iRid; // RID of some record. RID iNew; // RID of a new record. int i; // Loop control. ULONG ixTbl; // A table. #ifdef _DEBUG if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MD_ApplyDeltaBreak)) { _ASSERTE(!"CMiniMDRW::ApplyDelta()"); } #endif // _DEBUG // Init the suppressed column table. We know this one isn't zero... if (m_SuppressedDeltaColumns[TBL_TypeDef] == 0) { m_SuppressedDeltaColumns[TBL_EventMap] = (1 << EventMapRec::COL_EventList); m_SuppressedDeltaColumns[TBL_PropertyMap] = (1 << PropertyMapRec::COL_PropertyList); m_SuppressedDeltaColumns[TBL_EventMap] = (1 << EventMapRec::COL_EventList); m_SuppressedDeltaColumns[TBL_Method] = (1 << MethodRec::COL_ParamList); m_SuppressedDeltaColumns[TBL_TypeDef] = (1 << TypeDefRec::COL_FieldList)|(1<<TypeDefRec::COL_MethodList); } // Verify the version of the MD. if (m_Schema.m_major != mdDelta.m_Schema.m_major || m_Schema.m_minor != mdDelta.m_Schema.m_minor) { _ASSERTE(!"Version of Delta MetaData is a incompatible with current MetaData."); //<TODO>@FUTURE: unique error in the future since we are not shipping ENC.</TODO> return E_INVALIDARG; } // verify MVIDs. ModuleRec *pModDelta; ModuleRec *pModBase; IfFailGo(mdDelta.GetModuleRecord(1, &pModDelta)); IfFailGo(GetModuleRecord(1, &pModBase)); GUID GuidDelta; GUID GuidBase; IfFailGo(mdDelta.getMvidOfModule(pModDelta, &GuidDelta)); IfFailGo(getMvidOfModule(pModBase, &GuidBase)); if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MD_DeltaCheck) && (GuidDelta != GuidBase)) { _ASSERTE(!"Delta MetaData has different base than current MetaData."); return E_INVALIDARG; } // Let the other md prepare for sparse records. IfFailGo(mdDelta.StartENCMap()); // Fix the heaps. IfFailGo(ApplyHeapDeltas(mdDelta)); // Truncate some tables in preparation to copy in new ENCLog data. for (i = 0; (ixTbl = m_TruncatedEncTables[i]) != (ULONG)-1; ++i) { m_Tables[ixTbl].Delete(); IfFailGo(m_Tables[ixTbl].InitializeEmpty_WithRecordCount( m_TableDefs[ixTbl].m_cbRec, mdDelta.m_Schema.m_cRecs[ixTbl] COMMA_INDEBUG_MD(TRUE))); // fIsReadWrite INDEBUG_MD(m_Tables[ixTbl].Debug_SetTableInfo(NULL, ixTbl)); m_Schema.m_cRecs[ixTbl] = 0; } // For each record in the ENC log... for (iENC = 1; iENC <= mdDelta.m_Schema.m_cRecs[TBL_ENCLog]; ++iENC) { // Get the record, and the updated token. ENCLogRec *pENC; IfFailGo(mdDelta.GetENCLogRecord(iENC, &pENC)); ENCLogRec *pENC2; IfFailGo(AddENCLogRecord(&pENC2, &iNew)); IfNullGo(pENC2); ENCLogRec *pENC3; _ASSERTE(iNew == iENC); ULONG func = pENC->GetFuncCode(); pENC2->SetFuncCode(pENC->GetFuncCode()); pENC2->SetToken(pENC->GetToken()); // What kind of record is this? if (IsRecId(pENC->GetToken())) { // Non-token table iRid = RidFromRecId(pENC->GetToken()); ixTbl = TblFromRecId(pENC->GetToken()); } else { // Token table. iRid = RidFromToken(pENC->GetToken()); ixTbl = GetTableForToken(pENC->GetToken()); } RID rid_Ignore; // Switch based on the function code. switch (func) { case eDeltaMethodCreate: // Next ENC record will define the new Method. MethodRec *pMethodRecord; IfFailGo(AddMethodRecord(&pMethodRecord, &rid_Ignore)); IfFailGo(AddMethodToTypeDef(iRid, m_Schema.m_cRecs[TBL_Method])); break; case eDeltaParamCreate: // Next ENC record will define the new Param. This record is // tricky because params will be re-ordered based on their sequence, // but the sequence isn't set until the NEXT record is applied. // So, for ParamCreate only, apply the param record delta before // adding the parent-child linkage. ParamRec *pParamRecord; IfFailGo(AddParamRecord(&pParamRecord, &rid_Ignore)); // Should have recorded a Param delta after the Param add. _ASSERTE(iENC<mdDelta.m_Schema.m_cRecs[TBL_ENCLog]); IfFailGo(mdDelta.GetENCLogRecord(iENC+1, &pENC3)); _ASSERTE(pENC3->GetFuncCode() == 0); _ASSERTE(GetTableForToken(pENC3->GetToken()) == TBL_Param); IfFailGo(ApplyTableDelta(mdDelta, TBL_Param, RidFromToken(pENC3->GetToken()), eDeltaFuncDefault)); // Now that Param record is OK, set up linkage. IfFailGo(AddParamToMethod(iRid, m_Schema.m_cRecs[TBL_Param])); break; case eDeltaFieldCreate: // Next ENC record will define the new Field. FieldRec *pFieldRecord; IfFailGo(AddFieldRecord(&pFieldRecord, &rid_Ignore)); IfFailGo(AddFieldToTypeDef(iRid, m_Schema.m_cRecs[TBL_Field])); break; case eDeltaPropertyCreate: // Next ENC record will define the new Property. PropertyRec *pPropertyRecord; IfFailGo(AddPropertyRecord(&pPropertyRecord, &rid_Ignore)); IfFailGo(AddPropertyToPropertyMap(iRid, m_Schema.m_cRecs[TBL_Property])); break; case eDeltaEventCreate: // Next ENC record will define the new Event. EventRec *pEventRecord; IfFailGo(AddEventRecord(&pEventRecord, &rid_Ignore)); IfFailGo(AddEventToEventMap(iRid, m_Schema.m_cRecs[TBL_Event])); break; case eDeltaFuncDefault: IfFailGo(ApplyTableDelta(mdDelta, ixTbl, iRid, func)); break; default: _ASSERTE(!"Unexpected function in ApplyDelta"); IfFailGo(E_UNEXPECTED); break; } } m_Schema.m_cRecs[TBL_ENCLog] = mdDelta.m_Schema.m_cRecs[TBL_ENCLog]; ErrExit: // Store the result for returning (IfFailRet will modify hr) HRESULT hrReturn = hr; IfFailRet(mdDelta.EndENCMap()); return hrReturn; } // CMiniMdRW::ApplyDelta //***************************************************************************** //***************************************************************************** __checkReturn HRESULT CMiniMdRW::StartENCMap() // S_OK or error. { HRESULT hr = S_OK; ULONG iENC; // Loop control. ULONG ixTbl; // A table. int ixTblPrev = -1; // Table previously seen. _ASSERTE(m_rENCRecs == 0); if (m_Schema.m_cRecs[TBL_ENCMap] == 0) return S_OK; // Build an array of pointers into the ENCMap table for fast access to the ENCMap // for each table. m_rENCRecs = new (nothrow) ULONGARRAY; IfNullGo(m_rENCRecs); if (!m_rENCRecs->AllocateBlock(TBL_COUNT)) IfFailGo(E_OUTOFMEMORY); for (iENC = 1; iENC <= m_Schema.m_cRecs[TBL_ENCMap]; ++iENC) { ENCMapRec *pMap; IfFailGo(GetENCMapRecord(iENC, &pMap)); ixTbl = TblFromRecId(pMap->GetToken()); _ASSERTE((int)ixTbl >= ixTblPrev); _ASSERTE(ixTbl < TBL_COUNT); _ASSERTE(ixTbl != TBL_ENCMap); _ASSERTE(ixTbl != TBL_ENCLog); if ((int)ixTbl == ixTblPrev) continue; // Catch up on any skipped tables. while (ixTblPrev < (int)ixTbl) { (*m_rENCRecs)[++ixTblPrev] = iENC; } } while (ixTblPrev < TBL_COUNT-1) { (*m_rENCRecs)[++ixTblPrev] = iENC; } ErrExit: return hr; } // CMiniMdRW::StartENCMap //***************************************************************************** //***************************************************************************** __checkReturn HRESULT CMiniMdRW::EndENCMap() { if (m_rENCRecs != NULL) { delete m_rENCRecs; m_rENCRecs = NULL; } return S_OK; } // CMiniMdRW::EndENCMap
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/jit/bitsetasshortlong.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // A set of integers in the range [0..N], for some N defined by the "Env" (via "BitSetTraits"). // // Represented as a pointer-sized item. If N bits can fit in this item, the representation is "direct"; otherwise, // the item is a pointer to an array of K size_t's, where K is the number of size_t's necessary to hold N bits. #ifndef bitSetAsShortLong_DEFINED #define bitSetAsShortLong_DEFINED 1 #include "bitset.h" #include "compilerbitsettraits.h" typedef size_t* BitSetShortLongRep; template <typename Env, typename BitSetTraits> class BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits> { public: typedef BitSetShortLongRep Rep; private: static const unsigned BitsInSizeT = sizeof(size_t) * BitSetSupport::BitsInByte; inline static bool IsShort(Env env) { return BitSetTraits::GetArrSize(env, sizeof(size_t)) <= 1; } // The operations on the "long" (pointer-to-array-of-size_t) versions of the representation. static void AssignLong(Env env, BitSetShortLongRep& lhs, BitSetShortLongRep rhs); static BitSetShortLongRep MakeSingletonLong(Env env, unsigned bitNum); static BitSetShortLongRep MakeCopyLong(Env env, BitSetShortLongRep bs); static bool IsEmptyLong(Env env, BitSetShortLongRep bs); static unsigned CountLong(Env env, BitSetShortLongRep bs); static bool IsEmptyUnionLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2); static void UnionDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2); static void DiffDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2); static void AddElemDLong(Env env, BitSetShortLongRep& bs, unsigned i); static bool TryAddElemDLong(Env env, BitSetShortLongRep& bs, unsigned i); static void RemoveElemDLong(Env env, BitSetShortLongRep& bs, unsigned i); static void ClearDLong(Env env, BitSetShortLongRep& bs); static BitSetShortLongRep MakeUninitArrayBits(Env env); static BitSetShortLongRep MakeEmptyArrayBits(Env env); static BitSetShortLongRep MakeFullArrayBits(Env env); static bool IsMemberLong(Env env, BitSetShortLongRep bs, unsigned i); static bool EqualLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2); static bool IsSubsetLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2); static bool IsEmptyIntersectionLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2); static void IntersectionDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2); static void DataFlowDLong(Env env, BitSetShortLongRep& out, const BitSetShortLongRep gen, const BitSetShortLongRep in); static void LivenessDLong(Env env, BitSetShortLongRep& in, const BitSetShortLongRep def, const BitSetShortLongRep use, const BitSetShortLongRep out); #ifdef DEBUG static const char* ToStringLong(Env env, BitSetShortLongRep bs); #endif public: inline static BitSetShortLongRep UninitVal() { return nullptr; } static bool MayBeUninit(BitSetShortLongRep bs) { return bs == UninitVal(); } static void Assign(Env env, BitSetShortLongRep& lhs, BitSetShortLongRep rhs) { // We can't assert that rhs != UninitVal in the Short case, because in that // case it's a legal value. if (IsShort(env)) { // Both are short. lhs = rhs; } else if (lhs == UninitVal()) { assert(rhs != UninitVal()); lhs = MakeCopy(env, rhs); } else { AssignLong(env, lhs, rhs); } } static void AssignAllowUninitRhs(Env env, BitSetShortLongRep& lhs, BitSetShortLongRep rhs) { if (IsShort(env)) { // Both are short. lhs = rhs; } else if (rhs == UninitVal()) { lhs = rhs; } else if (lhs == UninitVal()) { lhs = MakeCopy(env, rhs); } else { AssignLong(env, lhs, rhs); } } static void AssignNoCopy(Env env, BitSetShortLongRep& lhs, BitSetShortLongRep rhs) { lhs = rhs; } static void ClearD(Env env, BitSetShortLongRep& bs) { if (IsShort(env)) { bs = (BitSetShortLongRep) nullptr; } else { assert(bs != UninitVal()); ClearDLong(env, bs); } } static BitSetShortLongRep MakeSingleton(Env env, unsigned bitNum) { assert(bitNum < BitSetTraits::GetSize(env)); if (IsShort(env)) { return BitSetShortLongRep(((size_t)1) << bitNum); } else { return MakeSingletonLong(env, bitNum); } } static BitSetShortLongRep MakeCopy(Env env, BitSetShortLongRep bs) { if (IsShort(env)) { return bs; } else { return MakeCopyLong(env, bs); } } static bool IsEmpty(Env env, BitSetShortLongRep bs) { if (IsShort(env)) { return bs == nullptr; } else { assert(bs != UninitVal()); return IsEmptyLong(env, bs); } } static unsigned Count(Env env, BitSetShortLongRep bs) { if (IsShort(env)) { return BitSetSupport::CountBitsInIntegral(size_t(bs)); } else { assert(bs != UninitVal()); return CountLong(env, bs); } } static bool IsEmptyUnion(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { if (IsShort(env)) { return (((size_t)bs1) | ((size_t)bs2)) == 0; } else { return IsEmptyUnionLong(env, bs1, bs2); } } static void UnionD(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2) { if (IsShort(env)) { bs1 = (BitSetShortLongRep)(((size_t)bs1) | ((size_t)bs2)); } else { UnionDLong(env, bs1, bs2); } } static BitSetShortLongRep Union(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { BitSetShortLongRep res = MakeCopy(env, bs1); UnionD(env, res, bs2); return res; } static void DiffD(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2) { if (IsShort(env)) { bs1 = (BitSetShortLongRep)(((size_t)bs1) & (~(size_t)bs2)); } else { DiffDLong(env, bs1, bs2); } } static BitSetShortLongRep Diff(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { BitSetShortLongRep res = MakeCopy(env, bs1); DiffD(env, res, bs2); return res; } static void RemoveElemD(Env env, BitSetShortLongRep& bs, unsigned i) { assert(i < BitSetTraits::GetSize(env)); if (IsShort(env)) { size_t mask = ((size_t)1) << i; mask = ~mask; bs = (BitSetShortLongRep)(((size_t)bs) & mask); } else { assert(bs != UninitVal()); RemoveElemDLong(env, bs, i); } } static BitSetShortLongRep RemoveElem(Env env, BitSetShortLongRep bs, unsigned i) { BitSetShortLongRep res = MakeCopy(env, bs); RemoveElemD(env, res, i); return res; } static void AddElemD(Env env, BitSetShortLongRep& bs, unsigned i) { assert(i < BitSetTraits::GetSize(env)); if (IsShort(env)) { size_t mask = ((size_t)1) << i; bs = (BitSetShortLongRep)(((size_t)bs) | mask); } else { AddElemDLong(env, bs, i); } } static BitSetShortLongRep AddElem(Env env, BitSetShortLongRep bs, unsigned i) { BitSetShortLongRep res = MakeCopy(env, bs); AddElemD(env, res, i); return res; } static bool TryAddElemD(Env env, BitSetShortLongRep& bs, unsigned i) { assert(i < BitSetTraits::GetSize(env)); if (IsShort(env)) { size_t mask = ((size_t)1) << i; size_t bits = (size_t)bs; bool added = (bits & mask) == 0; bs = (BitSetShortLongRep)(bits | mask); return added; } else { return TryAddElemDLong(env, bs, i); } } static bool IsMember(Env env, const BitSetShortLongRep bs, unsigned i) { assert(i < BitSetTraits::GetSize(env)); if (IsShort(env)) { size_t mask = ((size_t)1) << i; return (((size_t)bs) & mask) != 0; } else { assert(bs != UninitVal()); return IsMemberLong(env, bs, i); } } static void IntersectionD(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2) { if (IsShort(env)) { size_t val = (size_t)bs1; val &= (size_t)bs2; bs1 = (BitSetShortLongRep)val; } else { IntersectionDLong(env, bs1, bs2); } } static BitSetShortLongRep Intersection(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { BitSetShortLongRep res = MakeCopy(env, bs1); IntersectionD(env, res, bs2); return res; } static bool IsEmptyIntersection(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { if (IsShort(env)) { return (((size_t)bs1) & ((size_t)bs2)) == 0; } else { return IsEmptyIntersectionLong(env, bs1, bs2); } } static void DataFlowD(Env env, BitSetShortLongRep& out, const BitSetShortLongRep gen, const BitSetShortLongRep in) { if (IsShort(env)) { out = (BitSetShortLongRep)((size_t)out & ((size_t)gen | (size_t)in)); } else { DataFlowDLong(env, out, gen, in); } } static void LivenessD(Env env, BitSetShortLongRep& in, const BitSetShortLongRep def, const BitSetShortLongRep use, const BitSetShortLongRep out) { if (IsShort(env)) { in = (BitSetShortLongRep)((size_t)use | ((size_t)out & ~(size_t)def)); } else { LivenessDLong(env, in, def, use, out); } } static bool IsSubset(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { if (IsShort(env)) { size_t u1 = (size_t)bs1; size_t u2 = (size_t)bs2; return (u1 & u2) == u1; } else { return IsSubsetLong(env, bs1, bs2); } } static bool Equal(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { if (IsShort(env)) { return (size_t)bs1 == (size_t)bs2; } else { return EqualLong(env, bs1, bs2); } } #ifdef DEBUG // Returns a string valid until the allocator releases the memory. static const char* ToString(Env env, BitSetShortLongRep bs) { if (IsShort(env)) { assert(sizeof(BitSetShortLongRep) == sizeof(size_t)); const int CharsForSizeT = sizeof(size_t) * 2; char* res = nullptr; const int ShortAllocSize = CharsForSizeT + 4; res = (char*)BitSetTraits::DebugAlloc(env, ShortAllocSize); size_t bits = (size_t)bs; unsigned remaining = ShortAllocSize; char* ptr = res; if (sizeof(size_t) == sizeof(int64_t)) { sprintf_s(ptr, remaining, "%016zX", bits); } else { assert(sizeof(size_t) == sizeof(int)); sprintf_s(ptr, remaining, "%08X", (DWORD)bits); } return res; } else { return ToStringLong(env, bs); } } #endif static BitSetShortLongRep MakeEmpty(Env env) { if (IsShort(env)) { return nullptr; } else { return MakeEmptyArrayBits(env); } } static BitSetShortLongRep MakeFull(Env env) { if (IsShort(env)) { // Can't just shift by numBits+1, since that might be 32 (and (1 << 32( == 1, for an unsigned). unsigned numBits = BitSetTraits::GetSize(env); if (numBits == BitsInSizeT) { // Can't use the implementation below to get all 1's... return BitSetShortLongRep(size_t(-1)); } else { return BitSetShortLongRep((size_t(1) << numBits) - 1); } } else { return MakeFullArrayBits(env); } } class Iter { // The BitSet that we're iterating over. This is updated to point at the current // size_t set of bits. BitSetShortLongRep m_bs; // The end of the iteration. BitSetShortLongRep m_bsEnd; // The remaining bits to be iterated over in the current size_t set of bits. // In the "short" case, these are all the remaining bits. // In the "long" case, these are remaining bits in the current element; // these and the bits in the remaining elements comprise the remaining bits. size_t m_bits; // The number of bits that have already been iterated over (set or clear). If you // add this to the bit number of the next bit in "m_bits", you get the proper bit number of that // bit in "m_bs". This is only updated when we increment m_bs. unsigned m_bitNum; public: Iter(Env env, const BitSetShortLongRep& bs) : m_bs(bs), m_bitNum(0) { if (BitSetOps::IsShort(env)) { m_bits = (size_t)bs; // Set the iteration end condition, valid even though this is not a pointer in the short case. m_bsEnd = bs + 1; } else { assert(bs != BitSetOps::UninitVal()); m_bits = bs[0]; unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); m_bsEnd = bs + len; } } bool NextElem(unsigned* pElem) { #if BITSET_TRACK_OPCOUNTS BitSetStaticsImpl::RecordOp(BitSetStaticsImpl::BSOP_NextBit); #endif for (;;) { DWORD nextBit; bool hasBit; #ifdef HOST_64BIT static_assert_no_msg(sizeof(size_t) == 8); hasBit = BitScanForward64(&nextBit, m_bits); #else static_assert_no_msg(sizeof(size_t) == 4); hasBit = BitScanForward(&nextBit, m_bits); #endif // If there's a bit, doesn't matter if we're short or long. if (hasBit) { *pElem = m_bitNum + nextBit; m_bits &= ~(((size_t)1) << nextBit); // clear bit we just found so we don't find it again return true; } else { // Go to the next size_t bit element. For short bitsets, this will hit the end condition // and exit. ++m_bs; if (m_bs == m_bsEnd) { return false; } // If we get here, it's not a short type, so get the next size_t element. m_bitNum += sizeof(size_t) * BitSetSupport::BitsInByte; m_bits = *m_bs; } } } }; typedef const BitSetShortLongRep& ValArgType; typedef BitSetShortLongRep RetValType; }; template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::AssignLong(Env env, BitSetShortLongRep& lhs, BitSetShortLongRep rhs) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { lhs[i] = rhs[i]; } } template <typename Env, typename BitSetTraits> BitSetShortLongRep BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::MakeSingletonLong(Env env, unsigned bitNum) { assert(!IsShort(env)); BitSetShortLongRep res = MakeEmptyArrayBits(env); unsigned index = bitNum / BitsInSizeT; res[index] = ((size_t)1) << (bitNum % BitsInSizeT); return res; } template <typename Env, typename BitSetTraits> BitSetShortLongRep BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::MakeCopyLong(Env env, BitSetShortLongRep bs) { assert(!IsShort(env)); BitSetShortLongRep res = MakeUninitArrayBits(env); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { res[i] = bs[i]; } return res; } template <typename Env, typename BitSetTraits> bool BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::IsEmptyLong(Env env, BitSetShortLongRep bs) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { if (bs[i] != 0) { return false; } } return true; } template <typename Env, typename BitSetTraits> unsigned BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::CountLong(Env env, BitSetShortLongRep bs) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); unsigned res = 0; for (unsigned i = 0; i < len; i++) { res += BitSetSupport::CountBitsInIntegral(bs[i]); } return res; } template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::UnionDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { bs1[i] |= bs2[i]; } } template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::DiffDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { bs1[i] &= ~bs2[i]; } } template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::AddElemDLong(Env env, BitSetShortLongRep& bs, unsigned i) { assert(!IsShort(env)); unsigned index = i / BitsInSizeT; size_t mask = ((size_t)1) << (i % BitsInSizeT); bs[index] |= mask; } template <typename Env, typename BitSetTraits> bool BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::TryAddElemDLong(Env env, BitSetShortLongRep& bs, unsigned i) { assert(!IsShort(env)); unsigned index = i / BitsInSizeT; size_t mask = ((size_t)1) << (i % BitsInSizeT); size_t bits = bs[index]; bool added = (bits & mask) == 0; bs[index] = bits | mask; return added; } template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::RemoveElemDLong(Env env, BitSetShortLongRep& bs, unsigned i) { assert(!IsShort(env)); unsigned index = i / BitsInSizeT; size_t mask = ((size_t)1) << (i % BitsInSizeT); mask = ~mask; bs[index] &= mask; } template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::ClearDLong(Env env, BitSetShortLongRep& bs) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { bs[i] = 0; } } template <typename Env, typename BitSetTraits> BitSetShortLongRep BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::MakeUninitArrayBits(Env env) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); assert(len > 1); // Or else would not require an array. return (BitSetShortLongRep)(BitSetTraits::Alloc(env, len * sizeof(size_t))); } template <typename Env, typename BitSetTraits> BitSetShortLongRep BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::MakeEmptyArrayBits(Env env) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); assert(len > 1); // Or else would not require an array. BitSetShortLongRep res = (BitSetShortLongRep)(BitSetTraits::Alloc(env, len * sizeof(size_t))); for (unsigned i = 0; i < len; i++) { res[i] = 0; } return res; } template <typename Env, typename BitSetTraits> BitSetShortLongRep BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::MakeFullArrayBits(Env env) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); assert(len > 1); // Or else would not require an array. BitSetShortLongRep res = (BitSetShortLongRep)(BitSetTraits::Alloc(env, len * sizeof(size_t))); for (unsigned i = 0; i < len - 1; i++) { res[i] = size_t(-1); } // Start with all ones, shift in zeros in the last elem. unsigned lastElemBits = (BitSetTraits::GetSize(env) - 1) % BitsInSizeT + 1; res[len - 1] = (size_t(-1) >> (BitsInSizeT - lastElemBits)); return res; } template <typename Env, typename BitSetTraits> bool BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::IsMemberLong(Env env, BitSetShortLongRep bs, unsigned i) { assert(!IsShort(env)); unsigned index = i / BitsInSizeT; unsigned bitInElem = (i % BitsInSizeT); size_t mask = ((size_t)1) << bitInElem; return (bs[index] & mask) != 0; } template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::IntersectionDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { bs1[i] &= bs2[i]; } } template <typename Env, typename BitSetTraits> bool BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::IsEmptyIntersectionLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { if ((bs1[i] & bs2[i]) != 0) { return false; } } return true; } template <typename Env, typename BitSetTraits> bool BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::IsEmptyUnionLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { if ((bs1[i] | bs2[i]) != 0) { return false; } } return true; } template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::DataFlowDLong(Env env, BitSetShortLongRep& out, const BitSetShortLongRep gen, const BitSetShortLongRep in) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { out[i] = out[i] & (gen[i] | in[i]); } } template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::LivenessDLong(Env env, BitSetShortLongRep& in, const BitSetShortLongRep def, const BitSetShortLongRep use, const BitSetShortLongRep out) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { in[i] = use[i] | (out[i] & ~def[i]); } } template <typename Env, typename BitSetTraits> bool BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::EqualLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { if (bs1[i] != bs2[i]) { return false; } } return true; } template <typename Env, typename BitSetTraits> bool BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::IsSubsetLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { if ((bs1[i] & bs2[i]) != bs1[i]) { return false; } } return true; } #ifdef DEBUG template <typename Env, typename BitSetTraits> const char* BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::ToStringLong(Env env, BitSetShortLongRep bs) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); const int CharsForSizeT = sizeof(size_t) * 2; unsigned allocSz = len * CharsForSizeT + 4; unsigned remaining = allocSz; char* res = (char*)BitSetTraits::DebugAlloc(env, allocSz); char* temp = res; for (unsigned i = len; 0 < i; i--) { size_t bits = bs[i - 1]; if (sizeof(size_t) == sizeof(int64_t)) { sprintf_s(temp, remaining, "%016zX", bits); temp += 16; remaining -= 16; } else { assert(sizeof(size_t) == sizeof(unsigned)); sprintf_s(temp, remaining, "%08X", (unsigned)bits); temp += 8; remaining -= 8; } } return res; } #endif #endif // bitSetAsShortLong_DEFINED
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // A set of integers in the range [0..N], for some N defined by the "Env" (via "BitSetTraits"). // // Represented as a pointer-sized item. If N bits can fit in this item, the representation is "direct"; otherwise, // the item is a pointer to an array of K size_t's, where K is the number of size_t's necessary to hold N bits. #ifndef bitSetAsShortLong_DEFINED #define bitSetAsShortLong_DEFINED 1 #include "bitset.h" #include "compilerbitsettraits.h" typedef size_t* BitSetShortLongRep; template <typename Env, typename BitSetTraits> class BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits> { public: typedef BitSetShortLongRep Rep; private: static const unsigned BitsInSizeT = sizeof(size_t) * BitSetSupport::BitsInByte; inline static bool IsShort(Env env) { return BitSetTraits::GetArrSize(env, sizeof(size_t)) <= 1; } // The operations on the "long" (pointer-to-array-of-size_t) versions of the representation. static void AssignLong(Env env, BitSetShortLongRep& lhs, BitSetShortLongRep rhs); static BitSetShortLongRep MakeSingletonLong(Env env, unsigned bitNum); static BitSetShortLongRep MakeCopyLong(Env env, BitSetShortLongRep bs); static bool IsEmptyLong(Env env, BitSetShortLongRep bs); static unsigned CountLong(Env env, BitSetShortLongRep bs); static bool IsEmptyUnionLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2); static void UnionDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2); static void DiffDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2); static void AddElemDLong(Env env, BitSetShortLongRep& bs, unsigned i); static bool TryAddElemDLong(Env env, BitSetShortLongRep& bs, unsigned i); static void RemoveElemDLong(Env env, BitSetShortLongRep& bs, unsigned i); static void ClearDLong(Env env, BitSetShortLongRep& bs); static BitSetShortLongRep MakeUninitArrayBits(Env env); static BitSetShortLongRep MakeEmptyArrayBits(Env env); static BitSetShortLongRep MakeFullArrayBits(Env env); static bool IsMemberLong(Env env, BitSetShortLongRep bs, unsigned i); static bool EqualLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2); static bool IsSubsetLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2); static bool IsEmptyIntersectionLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2); static void IntersectionDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2); static void DataFlowDLong(Env env, BitSetShortLongRep& out, const BitSetShortLongRep gen, const BitSetShortLongRep in); static void LivenessDLong(Env env, BitSetShortLongRep& in, const BitSetShortLongRep def, const BitSetShortLongRep use, const BitSetShortLongRep out); #ifdef DEBUG static const char* ToStringLong(Env env, BitSetShortLongRep bs); #endif public: inline static BitSetShortLongRep UninitVal() { return nullptr; } static bool MayBeUninit(BitSetShortLongRep bs) { return bs == UninitVal(); } static void Assign(Env env, BitSetShortLongRep& lhs, BitSetShortLongRep rhs) { // We can't assert that rhs != UninitVal in the Short case, because in that // case it's a legal value. if (IsShort(env)) { // Both are short. lhs = rhs; } else if (lhs == UninitVal()) { assert(rhs != UninitVal()); lhs = MakeCopy(env, rhs); } else { AssignLong(env, lhs, rhs); } } static void AssignAllowUninitRhs(Env env, BitSetShortLongRep& lhs, BitSetShortLongRep rhs) { if (IsShort(env)) { // Both are short. lhs = rhs; } else if (rhs == UninitVal()) { lhs = rhs; } else if (lhs == UninitVal()) { lhs = MakeCopy(env, rhs); } else { AssignLong(env, lhs, rhs); } } static void AssignNoCopy(Env env, BitSetShortLongRep& lhs, BitSetShortLongRep rhs) { lhs = rhs; } static void ClearD(Env env, BitSetShortLongRep& bs) { if (IsShort(env)) { bs = (BitSetShortLongRep) nullptr; } else { assert(bs != UninitVal()); ClearDLong(env, bs); } } static BitSetShortLongRep MakeSingleton(Env env, unsigned bitNum) { assert(bitNum < BitSetTraits::GetSize(env)); if (IsShort(env)) { return BitSetShortLongRep(((size_t)1) << bitNum); } else { return MakeSingletonLong(env, bitNum); } } static BitSetShortLongRep MakeCopy(Env env, BitSetShortLongRep bs) { if (IsShort(env)) { return bs; } else { return MakeCopyLong(env, bs); } } static bool IsEmpty(Env env, BitSetShortLongRep bs) { if (IsShort(env)) { return bs == nullptr; } else { assert(bs != UninitVal()); return IsEmptyLong(env, bs); } } static unsigned Count(Env env, BitSetShortLongRep bs) { if (IsShort(env)) { return BitSetSupport::CountBitsInIntegral(size_t(bs)); } else { assert(bs != UninitVal()); return CountLong(env, bs); } } static bool IsEmptyUnion(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { if (IsShort(env)) { return (((size_t)bs1) | ((size_t)bs2)) == 0; } else { return IsEmptyUnionLong(env, bs1, bs2); } } static void UnionD(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2) { if (IsShort(env)) { bs1 = (BitSetShortLongRep)(((size_t)bs1) | ((size_t)bs2)); } else { UnionDLong(env, bs1, bs2); } } static BitSetShortLongRep Union(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { BitSetShortLongRep res = MakeCopy(env, bs1); UnionD(env, res, bs2); return res; } static void DiffD(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2) { if (IsShort(env)) { bs1 = (BitSetShortLongRep)(((size_t)bs1) & (~(size_t)bs2)); } else { DiffDLong(env, bs1, bs2); } } static BitSetShortLongRep Diff(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { BitSetShortLongRep res = MakeCopy(env, bs1); DiffD(env, res, bs2); return res; } static void RemoveElemD(Env env, BitSetShortLongRep& bs, unsigned i) { assert(i < BitSetTraits::GetSize(env)); if (IsShort(env)) { size_t mask = ((size_t)1) << i; mask = ~mask; bs = (BitSetShortLongRep)(((size_t)bs) & mask); } else { assert(bs != UninitVal()); RemoveElemDLong(env, bs, i); } } static BitSetShortLongRep RemoveElem(Env env, BitSetShortLongRep bs, unsigned i) { BitSetShortLongRep res = MakeCopy(env, bs); RemoveElemD(env, res, i); return res; } static void AddElemD(Env env, BitSetShortLongRep& bs, unsigned i) { assert(i < BitSetTraits::GetSize(env)); if (IsShort(env)) { size_t mask = ((size_t)1) << i; bs = (BitSetShortLongRep)(((size_t)bs) | mask); } else { AddElemDLong(env, bs, i); } } static BitSetShortLongRep AddElem(Env env, BitSetShortLongRep bs, unsigned i) { BitSetShortLongRep res = MakeCopy(env, bs); AddElemD(env, res, i); return res; } static bool TryAddElemD(Env env, BitSetShortLongRep& bs, unsigned i) { assert(i < BitSetTraits::GetSize(env)); if (IsShort(env)) { size_t mask = ((size_t)1) << i; size_t bits = (size_t)bs; bool added = (bits & mask) == 0; bs = (BitSetShortLongRep)(bits | mask); return added; } else { return TryAddElemDLong(env, bs, i); } } static bool IsMember(Env env, const BitSetShortLongRep bs, unsigned i) { assert(i < BitSetTraits::GetSize(env)); if (IsShort(env)) { size_t mask = ((size_t)1) << i; return (((size_t)bs) & mask) != 0; } else { assert(bs != UninitVal()); return IsMemberLong(env, bs, i); } } static void IntersectionD(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2) { if (IsShort(env)) { size_t val = (size_t)bs1; val &= (size_t)bs2; bs1 = (BitSetShortLongRep)val; } else { IntersectionDLong(env, bs1, bs2); } } static BitSetShortLongRep Intersection(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { BitSetShortLongRep res = MakeCopy(env, bs1); IntersectionD(env, res, bs2); return res; } static bool IsEmptyIntersection(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { if (IsShort(env)) { return (((size_t)bs1) & ((size_t)bs2)) == 0; } else { return IsEmptyIntersectionLong(env, bs1, bs2); } } static void DataFlowD(Env env, BitSetShortLongRep& out, const BitSetShortLongRep gen, const BitSetShortLongRep in) { if (IsShort(env)) { out = (BitSetShortLongRep)((size_t)out & ((size_t)gen | (size_t)in)); } else { DataFlowDLong(env, out, gen, in); } } static void LivenessD(Env env, BitSetShortLongRep& in, const BitSetShortLongRep def, const BitSetShortLongRep use, const BitSetShortLongRep out) { if (IsShort(env)) { in = (BitSetShortLongRep)((size_t)use | ((size_t)out & ~(size_t)def)); } else { LivenessDLong(env, in, def, use, out); } } static bool IsSubset(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { if (IsShort(env)) { size_t u1 = (size_t)bs1; size_t u2 = (size_t)bs2; return (u1 & u2) == u1; } else { return IsSubsetLong(env, bs1, bs2); } } static bool Equal(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { if (IsShort(env)) { return (size_t)bs1 == (size_t)bs2; } else { return EqualLong(env, bs1, bs2); } } #ifdef DEBUG // Returns a string valid until the allocator releases the memory. static const char* ToString(Env env, BitSetShortLongRep bs) { if (IsShort(env)) { assert(sizeof(BitSetShortLongRep) == sizeof(size_t)); const int CharsForSizeT = sizeof(size_t) * 2; char* res = nullptr; const int ShortAllocSize = CharsForSizeT + 4; res = (char*)BitSetTraits::DebugAlloc(env, ShortAllocSize); size_t bits = (size_t)bs; unsigned remaining = ShortAllocSize; char* ptr = res; if (sizeof(size_t) == sizeof(int64_t)) { sprintf_s(ptr, remaining, "%016zX", bits); } else { assert(sizeof(size_t) == sizeof(int)); sprintf_s(ptr, remaining, "%08X", (DWORD)bits); } return res; } else { return ToStringLong(env, bs); } } #endif static BitSetShortLongRep MakeEmpty(Env env) { if (IsShort(env)) { return nullptr; } else { return MakeEmptyArrayBits(env); } } static BitSetShortLongRep MakeFull(Env env) { if (IsShort(env)) { // Can't just shift by numBits+1, since that might be 32 (and (1 << 32( == 1, for an unsigned). unsigned numBits = BitSetTraits::GetSize(env); if (numBits == BitsInSizeT) { // Can't use the implementation below to get all 1's... return BitSetShortLongRep(size_t(-1)); } else { return BitSetShortLongRep((size_t(1) << numBits) - 1); } } else { return MakeFullArrayBits(env); } } class Iter { // The BitSet that we're iterating over. This is updated to point at the current // size_t set of bits. BitSetShortLongRep m_bs; // The end of the iteration. BitSetShortLongRep m_bsEnd; // The remaining bits to be iterated over in the current size_t set of bits. // In the "short" case, these are all the remaining bits. // In the "long" case, these are remaining bits in the current element; // these and the bits in the remaining elements comprise the remaining bits. size_t m_bits; // The number of bits that have already been iterated over (set or clear). If you // add this to the bit number of the next bit in "m_bits", you get the proper bit number of that // bit in "m_bs". This is only updated when we increment m_bs. unsigned m_bitNum; public: Iter(Env env, const BitSetShortLongRep& bs) : m_bs(bs), m_bitNum(0) { if (BitSetOps::IsShort(env)) { m_bits = (size_t)bs; // Set the iteration end condition, valid even though this is not a pointer in the short case. m_bsEnd = bs + 1; } else { assert(bs != BitSetOps::UninitVal()); m_bits = bs[0]; unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); m_bsEnd = bs + len; } } bool NextElem(unsigned* pElem) { #if BITSET_TRACK_OPCOUNTS BitSetStaticsImpl::RecordOp(BitSetStaticsImpl::BSOP_NextBit); #endif for (;;) { DWORD nextBit; bool hasBit; #ifdef HOST_64BIT static_assert_no_msg(sizeof(size_t) == 8); hasBit = BitScanForward64(&nextBit, m_bits); #else static_assert_no_msg(sizeof(size_t) == 4); hasBit = BitScanForward(&nextBit, m_bits); #endif // If there's a bit, doesn't matter if we're short or long. if (hasBit) { *pElem = m_bitNum + nextBit; m_bits &= ~(((size_t)1) << nextBit); // clear bit we just found so we don't find it again return true; } else { // Go to the next size_t bit element. For short bitsets, this will hit the end condition // and exit. ++m_bs; if (m_bs == m_bsEnd) { return false; } // If we get here, it's not a short type, so get the next size_t element. m_bitNum += sizeof(size_t) * BitSetSupport::BitsInByte; m_bits = *m_bs; } } } }; typedef const BitSetShortLongRep& ValArgType; typedef BitSetShortLongRep RetValType; }; template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::AssignLong(Env env, BitSetShortLongRep& lhs, BitSetShortLongRep rhs) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { lhs[i] = rhs[i]; } } template <typename Env, typename BitSetTraits> BitSetShortLongRep BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::MakeSingletonLong(Env env, unsigned bitNum) { assert(!IsShort(env)); BitSetShortLongRep res = MakeEmptyArrayBits(env); unsigned index = bitNum / BitsInSizeT; res[index] = ((size_t)1) << (bitNum % BitsInSizeT); return res; } template <typename Env, typename BitSetTraits> BitSetShortLongRep BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::MakeCopyLong(Env env, BitSetShortLongRep bs) { assert(!IsShort(env)); BitSetShortLongRep res = MakeUninitArrayBits(env); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { res[i] = bs[i]; } return res; } template <typename Env, typename BitSetTraits> bool BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::IsEmptyLong(Env env, BitSetShortLongRep bs) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { if (bs[i] != 0) { return false; } } return true; } template <typename Env, typename BitSetTraits> unsigned BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::CountLong(Env env, BitSetShortLongRep bs) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); unsigned res = 0; for (unsigned i = 0; i < len; i++) { res += BitSetSupport::CountBitsInIntegral(bs[i]); } return res; } template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::UnionDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { bs1[i] |= bs2[i]; } } template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::DiffDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { bs1[i] &= ~bs2[i]; } } template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::AddElemDLong(Env env, BitSetShortLongRep& bs, unsigned i) { assert(!IsShort(env)); unsigned index = i / BitsInSizeT; size_t mask = ((size_t)1) << (i % BitsInSizeT); bs[index] |= mask; } template <typename Env, typename BitSetTraits> bool BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::TryAddElemDLong(Env env, BitSetShortLongRep& bs, unsigned i) { assert(!IsShort(env)); unsigned index = i / BitsInSizeT; size_t mask = ((size_t)1) << (i % BitsInSizeT); size_t bits = bs[index]; bool added = (bits & mask) == 0; bs[index] = bits | mask; return added; } template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::RemoveElemDLong(Env env, BitSetShortLongRep& bs, unsigned i) { assert(!IsShort(env)); unsigned index = i / BitsInSizeT; size_t mask = ((size_t)1) << (i % BitsInSizeT); mask = ~mask; bs[index] &= mask; } template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::ClearDLong(Env env, BitSetShortLongRep& bs) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { bs[i] = 0; } } template <typename Env, typename BitSetTraits> BitSetShortLongRep BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::MakeUninitArrayBits(Env env) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); assert(len > 1); // Or else would not require an array. return (BitSetShortLongRep)(BitSetTraits::Alloc(env, len * sizeof(size_t))); } template <typename Env, typename BitSetTraits> BitSetShortLongRep BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::MakeEmptyArrayBits(Env env) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); assert(len > 1); // Or else would not require an array. BitSetShortLongRep res = (BitSetShortLongRep)(BitSetTraits::Alloc(env, len * sizeof(size_t))); for (unsigned i = 0; i < len; i++) { res[i] = 0; } return res; } template <typename Env, typename BitSetTraits> BitSetShortLongRep BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::MakeFullArrayBits(Env env) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); assert(len > 1); // Or else would not require an array. BitSetShortLongRep res = (BitSetShortLongRep)(BitSetTraits::Alloc(env, len * sizeof(size_t))); for (unsigned i = 0; i < len - 1; i++) { res[i] = size_t(-1); } // Start with all ones, shift in zeros in the last elem. unsigned lastElemBits = (BitSetTraits::GetSize(env) - 1) % BitsInSizeT + 1; res[len - 1] = (size_t(-1) >> (BitsInSizeT - lastElemBits)); return res; } template <typename Env, typename BitSetTraits> bool BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::IsMemberLong(Env env, BitSetShortLongRep bs, unsigned i) { assert(!IsShort(env)); unsigned index = i / BitsInSizeT; unsigned bitInElem = (i % BitsInSizeT); size_t mask = ((size_t)1) << bitInElem; return (bs[index] & mask) != 0; } template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::IntersectionDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { bs1[i] &= bs2[i]; } } template <typename Env, typename BitSetTraits> bool BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::IsEmptyIntersectionLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { if ((bs1[i] & bs2[i]) != 0) { return false; } } return true; } template <typename Env, typename BitSetTraits> bool BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::IsEmptyUnionLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { if ((bs1[i] | bs2[i]) != 0) { return false; } } return true; } template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::DataFlowDLong(Env env, BitSetShortLongRep& out, const BitSetShortLongRep gen, const BitSetShortLongRep in) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { out[i] = out[i] & (gen[i] | in[i]); } } template <typename Env, typename BitSetTraits> void BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::LivenessDLong(Env env, BitSetShortLongRep& in, const BitSetShortLongRep def, const BitSetShortLongRep use, const BitSetShortLongRep out) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { in[i] = use[i] | (out[i] & ~def[i]); } } template <typename Env, typename BitSetTraits> bool BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::EqualLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { if (bs1[i] != bs2[i]) { return false; } } return true; } template <typename Env, typename BitSetTraits> bool BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::IsSubsetLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); for (unsigned i = 0; i < len; i++) { if ((bs1[i] & bs2[i]) != bs1[i]) { return false; } } return true; } #ifdef DEBUG template <typename Env, typename BitSetTraits> const char* BitSetOps</*BitSetType*/ BitSetShortLongRep, /*Brand*/ BSShortLong, /*Env*/ Env, /*BitSetTraits*/ BitSetTraits>::ToStringLong(Env env, BitSetShortLongRep bs) { assert(!IsShort(env)); unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t)); const int CharsForSizeT = sizeof(size_t) * 2; unsigned allocSz = len * CharsForSizeT + 4; unsigned remaining = allocSz; char* res = (char*)BitSetTraits::DebugAlloc(env, allocSz); char* temp = res; for (unsigned i = len; 0 < i; i--) { size_t bits = bs[i - 1]; if (sizeof(size_t) == sizeof(int64_t)) { sprintf_s(temp, remaining, "%016zX", bits); temp += 16; remaining -= 16; } else { assert(sizeof(size_t) == sizeof(unsigned)); sprintf_s(temp, remaining, "%08X", (unsigned)bits); temp += 8; remaining -= 8; } } return res; } #endif #endif // bitSetAsShortLong_DEFINED
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/native/libs/System.Security.Cryptography.Native/pal_evp_pkey_dsa.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pal_types.h" #include "pal_compiler.h" #include "opensslshim.h" /* Shims the EVP_PKEY_get1_DSA method. Returns the DSA instance for the EVP_PKEY. */ PALEXPORT DSA* CryptoNative_EvpPkeyGetDsa(EVP_PKEY* pkey); /* Shims the EVP_PKEY_set1_DSA method to set the DSA instance on the EVP_KEY. Returns 1 upon success, otherwise 0. */ PALEXPORT int32_t CryptoNative_EvpPkeySetDsa(EVP_PKEY* pkey, DSA* dsa);
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pal_types.h" #include "pal_compiler.h" #include "opensslshim.h" /* Shims the EVP_PKEY_get1_DSA method. Returns the DSA instance for the EVP_PKEY. */ PALEXPORT DSA* CryptoNative_EvpPkeyGetDsa(EVP_PKEY* pkey); /* Shims the EVP_PKEY_set1_DSA method to set the DSA instance on the EVP_KEY. Returns 1 upon success, otherwise 0. */ PALEXPORT int32_t CryptoNative_EvpPkeySetDsa(EVP_PKEY* pkey, DSA* dsa);
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/mono/mono/sgen/sgen-thread-pool.h
/** * \file * Threadpool for all concurrent GC work. * * Copyright (C) 2015 Xamarin Inc * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_SGEN_THREAD_POOL_H__ #define __MONO_SGEN_THREAD_POOL_H__ #include "mono/sgen/sgen-pointer-queue.h" #include "mono/utils/mono-threads.h" #define SGEN_THREADPOOL_MAX_NUM_THREADS 8 #define SGEN_THREADPOOL_MAX_NUM_CONTEXTS 3 typedef struct _SgenThreadPoolJob SgenThreadPoolJob; typedef struct _SgenThreadPoolContext SgenThreadPoolContext; typedef void (*SgenThreadPoolJobFunc) (void *thread_data, SgenThreadPoolJob *job); typedef void (*SgenThreadPoolThreadInitFunc) (void*); typedef void (*SgenThreadPoolIdleJobFunc) (void*); typedef gboolean (*SgenThreadPoolContinueIdleJobFunc) (void*, int); typedef gboolean (*SgenThreadPoolShouldWorkFunc) (void*); typedef gboolean (*SgenThreadPoolContinueIdleWaitFunc) (int, int*); struct _SgenThreadPoolJob { const char *name; SgenThreadPoolJobFunc func; size_t size; volatile gint32 state; }; struct _SgenThreadPoolContext { /* Only accessed with the lock held. */ SgenPointerQueue job_queue; /* * LOCKING: Assumes the GC lock is held. */ void **deferred_jobs; int deferred_jobs_len; int deferred_jobs_count; SgenThreadPoolThreadInitFunc thread_init_func; SgenThreadPoolIdleJobFunc idle_job_func; SgenThreadPoolContinueIdleJobFunc continue_idle_job_func; SgenThreadPoolShouldWorkFunc should_work_func; void **thread_datas; int num_threads; }; int sgen_thread_pool_create_context (int num_threads, SgenThreadPoolThreadInitFunc init_func, SgenThreadPoolIdleJobFunc idle_func, SgenThreadPoolContinueIdleJobFunc continue_idle_func, SgenThreadPoolShouldWorkFunc should_work_func, void **thread_datas); void sgen_thread_pool_start (void); void sgen_thread_pool_shutdown (void); SgenThreadPoolJob* sgen_thread_pool_job_alloc (const char *name, SgenThreadPoolJobFunc func, size_t size); /* This only needs to be called on jobs that are not enqueued. */ void sgen_thread_pool_job_free (SgenThreadPoolJob *job); void sgen_thread_pool_job_enqueue (int context_id, SgenThreadPoolJob *job); /* * LOCKING: Assumes the GC lock is held. */ void sgen_thread_pool_job_enqueue_deferred (int context_id, SgenThreadPoolJob *job); /* * LOCKING: Assumes the GC lock is held. */ void sgen_thread_pool_flush_deferred_jobs (int context_id, gboolean signal); gboolean sgen_thread_pool_have_deferred_jobs (int context_id); /* This must only be called after the job has been enqueued. */ void sgen_thread_pool_job_wait (int context_id, SgenThreadPoolJob *job); void sgen_thread_pool_idle_signal (int context_id); void sgen_thread_pool_idle_wait (int context_id, SgenThreadPoolContinueIdleWaitFunc continue_wait); void sgen_thread_pool_wait_for_all_jobs (int context_id); int sgen_thread_pool_is_thread_pool_thread (MonoNativeThreadId thread); #endif
/** * \file * Threadpool for all concurrent GC work. * * Copyright (C) 2015 Xamarin Inc * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_SGEN_THREAD_POOL_H__ #define __MONO_SGEN_THREAD_POOL_H__ #include "mono/sgen/sgen-pointer-queue.h" #include "mono/utils/mono-threads.h" #define SGEN_THREADPOOL_MAX_NUM_THREADS 8 #define SGEN_THREADPOOL_MAX_NUM_CONTEXTS 3 typedef struct _SgenThreadPoolJob SgenThreadPoolJob; typedef struct _SgenThreadPoolContext SgenThreadPoolContext; typedef void (*SgenThreadPoolJobFunc) (void *thread_data, SgenThreadPoolJob *job); typedef void (*SgenThreadPoolThreadInitFunc) (void*); typedef void (*SgenThreadPoolIdleJobFunc) (void*); typedef gboolean (*SgenThreadPoolContinueIdleJobFunc) (void*, int); typedef gboolean (*SgenThreadPoolShouldWorkFunc) (void*); typedef gboolean (*SgenThreadPoolContinueIdleWaitFunc) (int, int*); struct _SgenThreadPoolJob { const char *name; SgenThreadPoolJobFunc func; size_t size; volatile gint32 state; }; struct _SgenThreadPoolContext { /* Only accessed with the lock held. */ SgenPointerQueue job_queue; /* * LOCKING: Assumes the GC lock is held. */ void **deferred_jobs; int deferred_jobs_len; int deferred_jobs_count; SgenThreadPoolThreadInitFunc thread_init_func; SgenThreadPoolIdleJobFunc idle_job_func; SgenThreadPoolContinueIdleJobFunc continue_idle_job_func; SgenThreadPoolShouldWorkFunc should_work_func; void **thread_datas; int num_threads; }; int sgen_thread_pool_create_context (int num_threads, SgenThreadPoolThreadInitFunc init_func, SgenThreadPoolIdleJobFunc idle_func, SgenThreadPoolContinueIdleJobFunc continue_idle_func, SgenThreadPoolShouldWorkFunc should_work_func, void **thread_datas); void sgen_thread_pool_start (void); void sgen_thread_pool_shutdown (void); SgenThreadPoolJob* sgen_thread_pool_job_alloc (const char *name, SgenThreadPoolJobFunc func, size_t size); /* This only needs to be called on jobs that are not enqueued. */ void sgen_thread_pool_job_free (SgenThreadPoolJob *job); void sgen_thread_pool_job_enqueue (int context_id, SgenThreadPoolJob *job); /* * LOCKING: Assumes the GC lock is held. */ void sgen_thread_pool_job_enqueue_deferred (int context_id, SgenThreadPoolJob *job); /* * LOCKING: Assumes the GC lock is held. */ void sgen_thread_pool_flush_deferred_jobs (int context_id, gboolean signal); gboolean sgen_thread_pool_have_deferred_jobs (int context_id); /* This must only be called after the job has been enqueued. */ void sgen_thread_pool_job_wait (int context_id, SgenThreadPoolJob *job); void sgen_thread_pool_idle_signal (int context_id); void sgen_thread_pool_idle_wait (int context_id, SgenThreadPoolContinueIdleWaitFunc continue_wait); void sgen_thread_pool_wait_for_all_jobs (int context_id); int sgen_thread_pool_is_thread_pool_thread (MonoNativeThreadId thread); #endif
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/native/libs/System.Security.Cryptography.Native/pal_evp_pkey_eckey.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pal_types.h" #include "pal_compiler.h" #include "opensslshim.h" /* Shims the EVP_PKEY_get1_EC_KEY method. Returns the EC_KEY instance for the EVP_PKEY. */ PALEXPORT EC_KEY* CryptoNative_EvpPkeyGetEcKey(EVP_PKEY* pkey); /* Shims the EVP_PKEY_set1_EC_KEY method to set the EC_KEY instance on the EVP_KEY. Returns 1 upon success, otherwise 0. */ PALEXPORT int32_t CryptoNative_EvpPkeySetEcKey(EVP_PKEY* pkey, EC_KEY* key);
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pal_types.h" #include "pal_compiler.h" #include "opensslshim.h" /* Shims the EVP_PKEY_get1_EC_KEY method. Returns the EC_KEY instance for the EVP_PKEY. */ PALEXPORT EC_KEY* CryptoNative_EvpPkeyGetEcKey(EVP_PKEY* pkey); /* Shims the EVP_PKEY_set1_EC_KEY method to set the EC_KEY instance on the EVP_KEY. Returns 1 upon success, otherwise 0. */ PALEXPORT int32_t CryptoNative_EvpPkeySetEcKey(EVP_PKEY* pkey, EC_KEY* key);
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/pal/tests/palsuite/c_runtime/swprintf/test3/test3.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test3.c ** ** Purpose: Tests swprintf with wide strings ** ** **==========================================================================*/ #include <palsuite.h> #include "../swprintf.h" /* * Uses memcmp & wcslen */ PALTEST(c_runtime_swprintf_test3_paltest_swprintf_test3, "c_runtime/swprintf/test3/paltest_swprintf_test3") { if (PAL_Initialize(argc, argv) != 0) { return FAIL; } DoStrTest(convert("foo %S"), "bar", convert("foo bar")); DoStrTest(convert("foo %hS"), "bar", convert("foo bar")); DoWStrTest(convert("foo %lS"), convert("bar"), convert("foo bar")); DoWStrTest(convert("foo %wS"), convert("bar"), convert("foo bar")); DoStrTest(convert("foo %LS"), "bar", convert("foo bar")); DoStrTest(convert("foo %I64S"), "bar", convert("foo bar")); DoStrTest(convert("foo %5S"), "bar", convert("foo bar")); DoStrTest(convert("foo %.2S"), "bar", convert("foo ba")); DoStrTest(convert("foo %5.2S"),"bar", convert("foo ba")); DoStrTest(convert("foo %-5S"), "bar", convert("foo bar ")); DoStrTest(convert("foo %05S"), "bar", convert("foo 00bar")); DoStrTest(convert("foo %S"), NULL, convert("foo (null)")); DoStrTest(convert("foo %hS"), NULL, convert("foo (null)")); DoWStrTest(convert("foo %lS"), NULL, convert("foo (null)")); DoWStrTest(convert("foo %wS"), NULL, convert("foo (null)")); DoStrTest(convert("foo %LS"), NULL, convert("foo (null)")); DoStrTest(convert("foo %I64S"), NULL, convert("foo (null)")); PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test3.c ** ** Purpose: Tests swprintf with wide strings ** ** **==========================================================================*/ #include <palsuite.h> #include "../swprintf.h" /* * Uses memcmp & wcslen */ PALTEST(c_runtime_swprintf_test3_paltest_swprintf_test3, "c_runtime/swprintf/test3/paltest_swprintf_test3") { if (PAL_Initialize(argc, argv) != 0) { return FAIL; } DoStrTest(convert("foo %S"), "bar", convert("foo bar")); DoStrTest(convert("foo %hS"), "bar", convert("foo bar")); DoWStrTest(convert("foo %lS"), convert("bar"), convert("foo bar")); DoWStrTest(convert("foo %wS"), convert("bar"), convert("foo bar")); DoStrTest(convert("foo %LS"), "bar", convert("foo bar")); DoStrTest(convert("foo %I64S"), "bar", convert("foo bar")); DoStrTest(convert("foo %5S"), "bar", convert("foo bar")); DoStrTest(convert("foo %.2S"), "bar", convert("foo ba")); DoStrTest(convert("foo %5.2S"),"bar", convert("foo ba")); DoStrTest(convert("foo %-5S"), "bar", convert("foo bar ")); DoStrTest(convert("foo %05S"), "bar", convert("foo 00bar")); DoStrTest(convert("foo %S"), NULL, convert("foo (null)")); DoStrTest(convert("foo %hS"), NULL, convert("foo (null)")); DoWStrTest(convert("foo %lS"), NULL, convert("foo (null)")); DoWStrTest(convert("foo %wS"), NULL, convert("foo (null)")); DoStrTest(convert("foo %LS"), NULL, convert("foo (null)")); DoStrTest(convert("foo %I64S"), NULL, convert("foo (null)")); PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/inc/caparser.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: caparser.h // // // // ============================================================================ #ifndef __CAPARSER_H__ #define __CAPARSER_H__ #include "stgpooli.h" class CustomAttributeParser { public: CustomAttributeParser( // Constructor for CustomAttributeParser. const void *pvBlob, // Pointer to the CustomAttribute blob. ULONG cbBlob) // Size of the CustomAttribute blob. : m_pbCur(reinterpret_cast<const BYTE*>(pvBlob)), m_pbBlob(reinterpret_cast<const BYTE*>(pvBlob)), m_cbBlob(cbBlob) { LIMITED_METHOD_CONTRACT; } private: signed __int8 GetI1() { LIMITED_METHOD_CONTRACT; signed __int8 tmp = *reinterpret_cast<const signed __int8*>(m_pbCur); m_pbCur += sizeof(signed __int8); return tmp; } unsigned __int8 GetU1() { LIMITED_METHOD_CONTRACT; unsigned __int8 tmp = *reinterpret_cast<const unsigned __int8*>(m_pbCur); m_pbCur += sizeof(unsigned __int8); return tmp; } signed __int16 GetI2() { LIMITED_METHOD_CONTRACT; signed __int16 tmp = GET_UNALIGNED_VAL16(m_pbCur); m_pbCur += sizeof(signed __int16); return tmp; } unsigned __int16 GetU2() { LIMITED_METHOD_CONTRACT; unsigned __int16 tmp = GET_UNALIGNED_VAL16(m_pbCur); m_pbCur += sizeof(unsigned __int16 ); return tmp; } signed __int32 GetI4() { LIMITED_METHOD_CONTRACT; signed __int32 tmp = GET_UNALIGNED_VAL32(m_pbCur); m_pbCur += sizeof(signed __int32 ); return tmp; } unsigned __int32 GetU4() { LIMITED_METHOD_CONTRACT; unsigned __int32 tmp = GET_UNALIGNED_VAL32(m_pbCur); m_pbCur += sizeof(unsigned __int32 ); return tmp; } signed __int64 GetI8() { LIMITED_METHOD_CONTRACT; signed __int64 tmp = GET_UNALIGNED_VAL64(m_pbCur); m_pbCur += sizeof(signed __int64 ); return tmp; } unsigned __int64 GetU8() { LIMITED_METHOD_CONTRACT; unsigned __int64 tmp = GET_UNALIGNED_VAL64(m_pbCur); m_pbCur += sizeof(unsigned __int64 ); return tmp; } public: float GetR4() { LIMITED_METHOD_CONTRACT; __int32 tmp = GET_UNALIGNED_VAL32(m_pbCur); _ASSERTE(sizeof(__int32) == sizeof(float)); m_pbCur += sizeof(float); return (float &)tmp; } double GetR8() { LIMITED_METHOD_CONTRACT; __int64 tmp = GET_UNALIGNED_VAL64(m_pbCur); _ASSERTE(sizeof(__int64) == sizeof(double)); m_pbCur += sizeof(double); return (double &)tmp; } private: unsigned __int16 GetProlog() { WRAPPER_NO_CONTRACT; unsigned __int16 val; VERIFY(SUCCEEDED(GetProlog(&val))); return val; } LPCUTF8 GetString(ULONG *pcbString) { WRAPPER_NO_CONTRACT; LPCUTF8 val; VERIFY(SUCCEEDED(GetString(&val, pcbString))); return val; } public: HRESULT GetI1(signed __int8 *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(signed __int8)) return META_E_CA_INVALID_BLOB; *pVal = GetI1(); return S_OK; } HRESULT GetTag(CorSerializationType *pVal) { WRAPPER_NO_CONTRACT; HRESULT hr; signed __int8 tmp; IfFailRet(GetI1(&tmp)); *pVal = (CorSerializationType)((unsigned __int8)tmp); return hr; } HRESULT GetU1(unsigned __int8 *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(unsigned __int8)) return META_E_CA_INVALID_BLOB; *pVal = GetU1(); return S_OK; } HRESULT GetI2(signed __int16 *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(signed __int16)) return META_E_CA_INVALID_BLOB; *pVal = GetI2(); return S_OK; } HRESULT GetU2(unsigned __int16 *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(unsigned __int16)) return META_E_CA_INVALID_BLOB; *pVal = GetU2(); return S_OK; } HRESULT GetI4(signed __int32 *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(signed __int32)) return META_E_CA_INVALID_BLOB; *pVal = GetI4(); return S_OK; } HRESULT GetU4(unsigned __int32 *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(unsigned __int32)) return META_E_CA_INVALID_BLOB; *pVal = GetU4(); return S_OK; } HRESULT GetI8(signed __int64 *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(signed __int64)) return META_E_CA_INVALID_BLOB; *pVal = GetI8(); return S_OK; } HRESULT GetU8(unsigned __int64 *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(unsigned __int64)) return META_E_CA_INVALID_BLOB; *pVal = GetU8(); return S_OK; } HRESULT GetR4(float *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(float)) return META_E_CA_INVALID_BLOB; *pVal = GetR4(); return S_OK; } HRESULT GetR8(double *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(double)) return META_E_CA_INVALID_BLOB; *pVal = GetR8(); return S_OK; } HRESULT GetProlog(unsigned __int16 *pVal) { WRAPPER_NO_CONTRACT; m_pbCur = m_pbBlob; if (BytesLeft() < (int)(sizeof(BYTE) * 2)) return META_E_CA_INVALID_BLOB; return GetU2(pVal); } // Added for compatibility with anyone that may emit // blobs where the prolog is the only incorrect data. HRESULT SkipProlog() { unsigned __int16 val; return GetProlog(&val); } HRESULT ValidateProlog() { HRESULT hr; unsigned __int16 val; IfFailRet(GetProlog(&val)); if (val != 0x0001) return META_E_CA_INVALID_BLOB; return hr; } // // IMPORTANT: the returned string is typically not null-terminated. // // This can return any of three distinct valid results: // - NULL string, indicated by *pszString==NULL, *pcbString==0 // - empty string, indicated by *pszString!=NULL, *pcbString==0 // - non-empty string, indicated by *pdzString!=NULL, *pcbString!=0 // If you expect non-null or non-empty strings in your usage scenario, // call the GetNonNullString and GetNonEmptyString helpers below. // HRESULT GetString(LPCUTF8 *pszString, ULONG *pcbString) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_FORBID_FAULT; HRESULT hr; if (BytesLeft() == 0) { // Need to check for NULL string sentinal (see below), // so need to have at least one byte to read. IfFailRet(META_E_CA_INVALID_BLOB); } if (*m_pbCur == 0xFF) { // 0xFF indicates the NULL string, which is semantically // different than the empty string. *pszString = NULL; *pcbString = 0; m_pbCur++; return S_OK; } // Get the length, pointer to data following the length. return GetData((BYTE const **)pszString, pcbString); } // // This can return any of two distinct valid results: // - empty string, indicated by *pszString!=NULL, *pcbString==0 // - non-empty string, indicated by *pszString!=NULL, *pcbString!=0 // If you expect non-null or non-empty strings in your usage scenario, // call the GetNonNullString and GetNonEmptyString helpers below. // HRESULT GetNonNullString(LPCUTF8 *pszString, ULONG *pcbString) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_FORBID_FAULT; HRESULT hr; IfFailRet(GetString(pszString, pcbString)); if (*pszString == NULL) { return META_E_CA_INVALID_BLOB; } return S_OK; } // // This function will only return success if the string is valid, // non-NULL and non-empty; i.e., *pszString!=NULL, *pcbString!=0 // HRESULT GetNonEmptyString(LPCUTF8 *pszString, ULONG *pcbString) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_FORBID_FAULT; HRESULT hr; IfFailRet(GetNonNullString(pszString, pcbString)); if (*pcbString == 0) { return META_E_CA_INVALID_BLOB; } return S_OK; } // IMPORTANT: do not use with string fetching - use GetString instead. HRESULT GetData(BYTE const **ppbData, ULONG *pcbData) { HRESULT hr; IfFailRet(CPackedLen::SafeGetData(m_pbCur, m_pbBlob + m_cbBlob, pcbData, ppbData)); // Move past the data we just recovered m_pbCur = *ppbData + *pcbData; return S_OK; } // IMPORTANT: do not use with string fetching - use GetString instead. HRESULT GetPackedValue(ULONG *pcbData) { return CPackedLen::SafeGetLength(m_pbCur, m_pbBlob + m_cbBlob, pcbData, &m_pbCur); } int BytesLeft() { LIMITED_METHOD_CONTRACT; return (int)(m_cbBlob - (m_pbCur - m_pbBlob)); } private: const BYTE *m_pbCur; const BYTE *m_pbBlob; ULONG m_cbBlob; }; #endif // __CAPARSER_H__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: caparser.h // // // // ============================================================================ #ifndef __CAPARSER_H__ #define __CAPARSER_H__ #include "stgpooli.h" class CustomAttributeParser { public: CustomAttributeParser( // Constructor for CustomAttributeParser. const void *pvBlob, // Pointer to the CustomAttribute blob. ULONG cbBlob) // Size of the CustomAttribute blob. : m_pbCur(reinterpret_cast<const BYTE*>(pvBlob)), m_pbBlob(reinterpret_cast<const BYTE*>(pvBlob)), m_cbBlob(cbBlob) { LIMITED_METHOD_CONTRACT; } private: signed __int8 GetI1() { LIMITED_METHOD_CONTRACT; signed __int8 tmp = *reinterpret_cast<const signed __int8*>(m_pbCur); m_pbCur += sizeof(signed __int8); return tmp; } unsigned __int8 GetU1() { LIMITED_METHOD_CONTRACT; unsigned __int8 tmp = *reinterpret_cast<const unsigned __int8*>(m_pbCur); m_pbCur += sizeof(unsigned __int8); return tmp; } signed __int16 GetI2() { LIMITED_METHOD_CONTRACT; signed __int16 tmp = GET_UNALIGNED_VAL16(m_pbCur); m_pbCur += sizeof(signed __int16); return tmp; } unsigned __int16 GetU2() { LIMITED_METHOD_CONTRACT; unsigned __int16 tmp = GET_UNALIGNED_VAL16(m_pbCur); m_pbCur += sizeof(unsigned __int16 ); return tmp; } signed __int32 GetI4() { LIMITED_METHOD_CONTRACT; signed __int32 tmp = GET_UNALIGNED_VAL32(m_pbCur); m_pbCur += sizeof(signed __int32 ); return tmp; } unsigned __int32 GetU4() { LIMITED_METHOD_CONTRACT; unsigned __int32 tmp = GET_UNALIGNED_VAL32(m_pbCur); m_pbCur += sizeof(unsigned __int32 ); return tmp; } signed __int64 GetI8() { LIMITED_METHOD_CONTRACT; signed __int64 tmp = GET_UNALIGNED_VAL64(m_pbCur); m_pbCur += sizeof(signed __int64 ); return tmp; } unsigned __int64 GetU8() { LIMITED_METHOD_CONTRACT; unsigned __int64 tmp = GET_UNALIGNED_VAL64(m_pbCur); m_pbCur += sizeof(unsigned __int64 ); return tmp; } public: float GetR4() { LIMITED_METHOD_CONTRACT; __int32 tmp = GET_UNALIGNED_VAL32(m_pbCur); _ASSERTE(sizeof(__int32) == sizeof(float)); m_pbCur += sizeof(float); return (float &)tmp; } double GetR8() { LIMITED_METHOD_CONTRACT; __int64 tmp = GET_UNALIGNED_VAL64(m_pbCur); _ASSERTE(sizeof(__int64) == sizeof(double)); m_pbCur += sizeof(double); return (double &)tmp; } private: unsigned __int16 GetProlog() { WRAPPER_NO_CONTRACT; unsigned __int16 val; VERIFY(SUCCEEDED(GetProlog(&val))); return val; } LPCUTF8 GetString(ULONG *pcbString) { WRAPPER_NO_CONTRACT; LPCUTF8 val; VERIFY(SUCCEEDED(GetString(&val, pcbString))); return val; } public: HRESULT GetI1(signed __int8 *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(signed __int8)) return META_E_CA_INVALID_BLOB; *pVal = GetI1(); return S_OK; } HRESULT GetTag(CorSerializationType *pVal) { WRAPPER_NO_CONTRACT; HRESULT hr; signed __int8 tmp; IfFailRet(GetI1(&tmp)); *pVal = (CorSerializationType)((unsigned __int8)tmp); return hr; } HRESULT GetU1(unsigned __int8 *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(unsigned __int8)) return META_E_CA_INVALID_BLOB; *pVal = GetU1(); return S_OK; } HRESULT GetI2(signed __int16 *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(signed __int16)) return META_E_CA_INVALID_BLOB; *pVal = GetI2(); return S_OK; } HRESULT GetU2(unsigned __int16 *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(unsigned __int16)) return META_E_CA_INVALID_BLOB; *pVal = GetU2(); return S_OK; } HRESULT GetI4(signed __int32 *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(signed __int32)) return META_E_CA_INVALID_BLOB; *pVal = GetI4(); return S_OK; } HRESULT GetU4(unsigned __int32 *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(unsigned __int32)) return META_E_CA_INVALID_BLOB; *pVal = GetU4(); return S_OK; } HRESULT GetI8(signed __int64 *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(signed __int64)) return META_E_CA_INVALID_BLOB; *pVal = GetI8(); return S_OK; } HRESULT GetU8(unsigned __int64 *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(unsigned __int64)) return META_E_CA_INVALID_BLOB; *pVal = GetU8(); return S_OK; } HRESULT GetR4(float *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(float)) return META_E_CA_INVALID_BLOB; *pVal = GetR4(); return S_OK; } HRESULT GetR8(double *pVal) { WRAPPER_NO_CONTRACT; if (BytesLeft() < (int) sizeof(double)) return META_E_CA_INVALID_BLOB; *pVal = GetR8(); return S_OK; } HRESULT GetProlog(unsigned __int16 *pVal) { WRAPPER_NO_CONTRACT; m_pbCur = m_pbBlob; if (BytesLeft() < (int)(sizeof(BYTE) * 2)) return META_E_CA_INVALID_BLOB; return GetU2(pVal); } // Added for compatibility with anyone that may emit // blobs where the prolog is the only incorrect data. HRESULT SkipProlog() { unsigned __int16 val; return GetProlog(&val); } HRESULT ValidateProlog() { HRESULT hr; unsigned __int16 val; IfFailRet(GetProlog(&val)); if (val != 0x0001) return META_E_CA_INVALID_BLOB; return hr; } // // IMPORTANT: the returned string is typically not null-terminated. // // This can return any of three distinct valid results: // - NULL string, indicated by *pszString==NULL, *pcbString==0 // - empty string, indicated by *pszString!=NULL, *pcbString==0 // - non-empty string, indicated by *pdzString!=NULL, *pcbString!=0 // If you expect non-null or non-empty strings in your usage scenario, // call the GetNonNullString and GetNonEmptyString helpers below. // HRESULT GetString(LPCUTF8 *pszString, ULONG *pcbString) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_FORBID_FAULT; HRESULT hr; if (BytesLeft() == 0) { // Need to check for NULL string sentinal (see below), // so need to have at least one byte to read. IfFailRet(META_E_CA_INVALID_BLOB); } if (*m_pbCur == 0xFF) { // 0xFF indicates the NULL string, which is semantically // different than the empty string. *pszString = NULL; *pcbString = 0; m_pbCur++; return S_OK; } // Get the length, pointer to data following the length. return GetData((BYTE const **)pszString, pcbString); } // // This can return any of two distinct valid results: // - empty string, indicated by *pszString!=NULL, *pcbString==0 // - non-empty string, indicated by *pszString!=NULL, *pcbString!=0 // If you expect non-null or non-empty strings in your usage scenario, // call the GetNonNullString and GetNonEmptyString helpers below. // HRESULT GetNonNullString(LPCUTF8 *pszString, ULONG *pcbString) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_FORBID_FAULT; HRESULT hr; IfFailRet(GetString(pszString, pcbString)); if (*pszString == NULL) { return META_E_CA_INVALID_BLOB; } return S_OK; } // // This function will only return success if the string is valid, // non-NULL and non-empty; i.e., *pszString!=NULL, *pcbString!=0 // HRESULT GetNonEmptyString(LPCUTF8 *pszString, ULONG *pcbString) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_FORBID_FAULT; HRESULT hr; IfFailRet(GetNonNullString(pszString, pcbString)); if (*pcbString == 0) { return META_E_CA_INVALID_BLOB; } return S_OK; } // IMPORTANT: do not use with string fetching - use GetString instead. HRESULT GetData(BYTE const **ppbData, ULONG *pcbData) { HRESULT hr; IfFailRet(CPackedLen::SafeGetData(m_pbCur, m_pbBlob + m_cbBlob, pcbData, ppbData)); // Move past the data we just recovered m_pbCur = *ppbData + *pcbData; return S_OK; } // IMPORTANT: do not use with string fetching - use GetString instead. HRESULT GetPackedValue(ULONG *pcbData) { return CPackedLen::SafeGetLength(m_pbCur, m_pbBlob + m_cbBlob, pcbData, &m_pbCur); } int BytesLeft() { LIMITED_METHOD_CONTRACT; return (int)(m_cbBlob - (m_pbCur - m_pbBlob)); } private: const BYTE *m_pbCur; const BYTE *m_pbBlob; ULONG m_cbBlob; }; #endif // __CAPARSER_H__
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/native/eventpipe/ep-provider-internals.h
#ifndef __EVENTPIPE_PROVIDER_INTERNALS_H__ #define __EVENTPIPE_PROVIDER_INTERNALS_H__ #include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #include "ep-types.h" /* * EventPipeProvider internal library functions. */ // Set the provider configuration (enable sets of events). // _Requires_lock_held (ep) const EventPipeProviderCallbackData * provider_set_config ( EventPipeProvider *provider, int64_t keywords_for_all_sessions, EventPipeEventLevel level_for_all_sessions, uint64_t session_mask, int64_t keywords, EventPipeEventLevel level, const ep_char8_t *filter_data, EventPipeProviderCallbackData *callback_data); // Unset the provider configuration for the specified session (disable sets of events). // _Requires_lock_held (ep) const EventPipeProviderCallbackData * provider_unset_config ( EventPipeProvider *provider, int64_t keywords_for_all_sessions, EventPipeEventLevel level_for_all_sessions, uint64_t session_mask, int64_t keywords, EventPipeEventLevel level, const ep_char8_t *filter_data, EventPipeProviderCallbackData *callback_data); // _Requires_lock_not_held (ep) void provider_invoke_callback (EventPipeProviderCallbackData *provider_callback_data); // Create and register provider. // _Requires_lock_held (ep) EventPipeProvider * provider_create_register ( const ep_char8_t *provider_name, EventPipeCallback callback_func, EventPipeCallbackDataFree callback_data_free_func, void *callback_data, EventPipeProviderCallbackDataQueue *provider_callback_data_queue); // Unregister and delete provider. // _Requires_lock_held (ep) void provider_unregister_delete (EventPipeProvider *provider); // Free provider. // _Requires_lock_held (ep) void provider_free (EventPipeProvider *provider); // Add event. // _Requires_lock_held (ep) EventPipeEvent * provider_add_event ( EventPipeProvider *provider, uint32_t event_id, uint64_t keywords, uint32_t event_version, EventPipeEventLevel level, bool need_stack, const uint8_t *metadata, uint32_t metadata_len); #endif /* ENABLE_PERFTRACING */ #endif /* __EVENTPIPE_PROVIDER_INTERNALS_H__ */
#ifndef __EVENTPIPE_PROVIDER_INTERNALS_H__ #define __EVENTPIPE_PROVIDER_INTERNALS_H__ #include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #include "ep-types.h" /* * EventPipeProvider internal library functions. */ // Set the provider configuration (enable sets of events). // _Requires_lock_held (ep) const EventPipeProviderCallbackData * provider_set_config ( EventPipeProvider *provider, int64_t keywords_for_all_sessions, EventPipeEventLevel level_for_all_sessions, uint64_t session_mask, int64_t keywords, EventPipeEventLevel level, const ep_char8_t *filter_data, EventPipeProviderCallbackData *callback_data); // Unset the provider configuration for the specified session (disable sets of events). // _Requires_lock_held (ep) const EventPipeProviderCallbackData * provider_unset_config ( EventPipeProvider *provider, int64_t keywords_for_all_sessions, EventPipeEventLevel level_for_all_sessions, uint64_t session_mask, int64_t keywords, EventPipeEventLevel level, const ep_char8_t *filter_data, EventPipeProviderCallbackData *callback_data); // _Requires_lock_not_held (ep) void provider_invoke_callback (EventPipeProviderCallbackData *provider_callback_data); // Create and register provider. // _Requires_lock_held (ep) EventPipeProvider * provider_create_register ( const ep_char8_t *provider_name, EventPipeCallback callback_func, EventPipeCallbackDataFree callback_data_free_func, void *callback_data, EventPipeProviderCallbackDataQueue *provider_callback_data_queue); // Unregister and delete provider. // _Requires_lock_held (ep) void provider_unregister_delete (EventPipeProvider *provider); // Free provider. // _Requires_lock_held (ep) void provider_free (EventPipeProvider *provider); // Add event. // _Requires_lock_held (ep) EventPipeEvent * provider_add_event ( EventPipeProvider *provider, uint32_t event_id, uint64_t keywords, uint32_t event_version, EventPipeEventLevel level, bool need_stack, const uint8_t *metadata, uint32_t metadata_len); #endif /* ENABLE_PERFTRACING */ #endif /* __EVENTPIPE_PROVIDER_INTERNALS_H__ */
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/native/corehost/ijwhost/arm64/bootstrap_thunk.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef IJW_BOOTSTRAP_THUNK_H #define IJW_BOOTSTRAP_THUNK_H #if !defined(TARGET_ARM64) #error "This file should only be included on arm64 builds." #endif #include "pal.h" #include "corhdr.h" extern "C" void start_runtime_thunk_stub(); #include <pshpack1.h> //================================================================================= class bootstrap_thunk { private: std::uint32_t m_rgCode[4]; std::uintptr_t m_pBootstrapCode; pal::dll_t m_dll; // pal::dll_t of this module std::uintptr_t *m_slot; // VTable slot for this thunk std::uint32_t m_token; // Token for this thunk public: // Get thunk from the address that the thunk code provided static bootstrap_thunk *get_thunk_from_cookie(std::uintptr_t cookie); // Get thunk from the thunk code entry point address static bootstrap_thunk *get_thunk_from_entrypoint(std::uintptr_t entryAddr); // Initializes the thunk to point to pThunkInitFcn that will load the // runtime and perform the real thunk initialization. void initialize(std::uintptr_t pThunkInitFcn, pal::dll_t dll, std::uint32_t token, std::uintptr_t *pSlot); // Returns the slot address of the vtable entry for this thunk std::uintptr_t *get_slot_address(); // Returns the pal::dll_t for this thunk's module pal::dll_t get_dll_handle(); // Returns the token of this thunk std::uint32_t get_token(); std::uintptr_t get_entrypoint(); }; #include <poppack.h> #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef IJW_BOOTSTRAP_THUNK_H #define IJW_BOOTSTRAP_THUNK_H #if !defined(TARGET_ARM64) #error "This file should only be included on arm64 builds." #endif #include "pal.h" #include "corhdr.h" extern "C" void start_runtime_thunk_stub(); #include <pshpack1.h> //================================================================================= class bootstrap_thunk { private: std::uint32_t m_rgCode[4]; std::uintptr_t m_pBootstrapCode; pal::dll_t m_dll; // pal::dll_t of this module std::uintptr_t *m_slot; // VTable slot for this thunk std::uint32_t m_token; // Token for this thunk public: // Get thunk from the address that the thunk code provided static bootstrap_thunk *get_thunk_from_cookie(std::uintptr_t cookie); // Get thunk from the thunk code entry point address static bootstrap_thunk *get_thunk_from_entrypoint(std::uintptr_t entryAddr); // Initializes the thunk to point to pThunkInitFcn that will load the // runtime and perform the real thunk initialization. void initialize(std::uintptr_t pThunkInitFcn, pal::dll_t dll, std::uint32_t token, std::uintptr_t *pSlot); // Returns the slot address of the vtable entry for this thunk std::uintptr_t *get_slot_address(); // Returns the pal::dll_t for this thunk's module pal::dll_t get_dll_handle(); // Returns the token of this thunk std::uint32_t get_token(); std::uintptr_t get_entrypoint(); }; #include <poppack.h> #endif
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/debug/daccess/request.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // File: request.cpp // // // CorDataAccess::Request implementation. // //***************************************************************************** #include "stdafx.h" #include <win32threadpool.h> #include "typestring.h" #include <gccover.h> #include <virtualcallstub.h> #ifdef FEATURE_COMINTEROP #include <comcallablewrapper.h> #endif // FEATURE_COMINTEROP #ifdef FEATURE_COMWRAPPERS #include <interoplibinterface.h> #include <interoplibabi.h> typedef DPTR(InteropLibInterface::ExternalObjectContextBase) PTR_ExternalObjectContext; typedef DPTR(InteropLib::ABI::ManagedObjectWrapperLayout) PTR_ManagedObjectWrapper; #endif // FEATURE_COMWRAPPERS #ifndef TARGET_UNIX // It is unfortunate having to include this header just to get the definition of GenericModeBlock #include <msodw.h> #endif // TARGET_UNIX // To include definiton of IsThrowableThreadAbortException #include <exstatecommon.h> #include "rejit.h" #include "request_common.h" // GC headers define these to EE-specific stuff that we don't want. #undef EnterCriticalSection #undef LeaveCriticalSection #define PTR_CDADDR(ptr) TO_CDADDR(PTR_TO_TADDR(ptr)) #define HOST_CDADDR(host) TO_CDADDR(PTR_HOST_TO_TADDR(host)) #define SOSDacEnter() \ DAC_ENTER(); \ HRESULT hr = S_OK; \ EX_TRY \ { #define SOSDacLeave() \ } \ EX_CATCH \ { \ if (!DacExceptionFilter(GET_EXCEPTION(), this, &hr)) \ { \ EX_RETHROW; \ } \ } \ EX_END_CATCH(SwallowAllExceptions) \ DAC_LEAVE(); // Use this when you don't want to instantiate an Object * in the host. TADDR DACGetMethodTableFromObjectPointer(TADDR objAddr, ICorDebugDataTarget * target) { ULONG32 returned = 0; TADDR Value = NULL; HRESULT hr = target->ReadVirtual(objAddr, (PBYTE)&Value, sizeof(TADDR), &returned); if ((hr != S_OK) || (returned != sizeof(TADDR))) { return NULL; } #if TARGET_64BIT Value = Value & ~7; // equivalent to Object::GetGCSafeMethodTable() #else Value = Value & ~3; // equivalent to Object::GetGCSafeMethodTable() #endif return Value; } // Use this when you don't want to instantiate an Object * in the host. PTR_SyncBlock DACGetSyncBlockFromObjectPointer(TADDR objAddr, ICorDebugDataTarget * target) { ULONG32 returned = 0; DWORD Value = NULL; HRESULT hr = target->ReadVirtual(objAddr - sizeof(DWORD), (PBYTE)&Value, sizeof(DWORD), &returned); if ((hr != S_OK) || (returned != sizeof(DWORD))) { return NULL; } if ((Value & (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE)) != BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) return NULL; Value &= MASK_SYNCBLOCKINDEX; PTR_SyncTableEntry ste = PTR_SyncTableEntry(dac_cast<TADDR>(g_pSyncTable) + (sizeof(SyncTableEntry) * Value)); return ste->m_SyncBlock; } BOOL DacValidateEEClass(EEClass *pEEClass) { // Verify things are right. // The EEClass method table pointer should match the method table. // TODO: Microsoft, need another test for validity, this one isn't always true anymore. BOOL retval = TRUE; EX_TRY { MethodTable *pMethodTable = pEEClass->GetMethodTable(); if (!pMethodTable) { // PREfix. retval = FALSE; } else if (pEEClass != pMethodTable->GetClass()) { retval = FALSE; } } EX_CATCH { retval = FALSE; // Something is wrong } EX_END_CATCH(SwallowAllExceptions) return retval; } BOOL DacValidateMethodTable(MethodTable *pMT, BOOL &bIsFree) { // Verify things are right. BOOL retval = FALSE; EX_TRY { bIsFree = FALSE; if (HOST_CDADDR(pMT) == HOST_CDADDR(g_pFreeObjectMethodTable)) { bIsFree = TRUE; } else { // Standard fast check if (!pMT->ValidateWithPossibleAV()) goto BadMethodTable; // In rare cases, we've seen the standard check above pass when it shouldn't. // Insert additional/ad-hoc tests below. // Metadata token should look valid for a class mdTypeDef td = pMT->GetCl(); if (td != mdTokenNil && TypeFromToken(td) != mdtTypeDef) goto BadMethodTable; // BaseSize should always be greater than 0 for valid objects (unless it's an interface) // For strings, baseSize is not ptr-aligned if (!pMT->IsInterface() && !pMT->IsString()) { if (pMT->GetBaseSize() == 0 || !IS_ALIGNED(pMT->GetBaseSize(), sizeof(void *))) goto BadMethodTable; } } retval = TRUE; BadMethodTable: ; } EX_CATCH { retval = FALSE; // Something is wrong } EX_END_CATCH(SwallowAllExceptions) return retval; } BOOL DacValidateMD(MethodDesc * pMD) { if (pMD == NULL) { return FALSE; } // Verify things are right. BOOL retval = TRUE; EX_TRY { MethodTable *pMethodTable = pMD->GetMethodTable(); // Standard fast check if (!pMethodTable->ValidateWithPossibleAV()) { retval = FALSE; } if (retval && (pMD->GetSlot() >= pMethodTable->GetNumVtableSlots() && !pMD->HasNonVtableSlot())) { retval = FALSE; } if (retval) { MethodDesc *pMDCheck = MethodDesc::GetMethodDescFromStubAddr(pMD->GetTemporaryEntryPoint(), TRUE); if (PTR_HOST_TO_TADDR(pMD) != PTR_HOST_TO_TADDR(pMDCheck)) { retval = FALSE; } } if (retval && pMD->HasNativeCode() && !pMD->IsFCall()) { PCODE jitCodeAddr = pMD->GetNativeCode(); MethodDesc *pMDCheck = ExecutionManager::GetCodeMethodDesc(jitCodeAddr); if (pMDCheck) { // Check that the given MethodDesc matches the MethodDesc from // the CodeHeader if (PTR_HOST_TO_TADDR(pMD) != PTR_HOST_TO_TADDR(pMDCheck)) { retval = FALSE; } } else { retval = FALSE; } } } EX_CATCH { retval = FALSE; // Something is wrong } EX_END_CATCH(SwallowAllExceptions) return retval; } BOOL DacValidateMD(LPCVOID pMD) { return DacValidateMD((MethodDesc *)pMD); } VOID GetJITMethodInfo (EECodeInfo * pCodeInfo, JITTypes *pJITType, CLRDATA_ADDRESS *pGCInfo) { DWORD dwType = pCodeInfo->GetJitManager()->GetCodeType(); if (IsMiIL(dwType)) { *pJITType = TYPE_JIT; } else if (IsMiNative(dwType)) { *pJITType = TYPE_PJIT; } else { *pJITType = TYPE_UNKNOWN; } *pGCInfo = (CLRDATA_ADDRESS)PTR_TO_TADDR(pCodeInfo->GetGCInfo()); } HRESULT ClrDataAccess::GetWorkRequestData(CLRDATA_ADDRESS addr, struct DacpWorkRequestData *workRequestData) { if (addr == 0 || workRequestData == NULL) return E_INVALIDARG; SOSDacEnter(); WorkRequest *pRequest = PTR_WorkRequest(TO_TADDR(addr)); workRequestData->Function = (TADDR)(pRequest->Function); workRequestData->Context = (TADDR)(pRequest->Context); workRequestData->NextWorkRequest = (TADDR)(pRequest->next); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetHillClimbingLogEntry(CLRDATA_ADDRESS addr, struct DacpHillClimbingLogEntry *entry) { if (addr == 0 || entry == NULL) return E_INVALIDARG; SOSDacEnter(); HillClimbingLogEntry *pLogEntry = PTR_HillClimbingLogEntry(TO_TADDR(addr)); entry->TickCount = pLogEntry->TickCount; entry->NewControlSetting = pLogEntry->NewControlSetting; entry->LastHistoryCount = pLogEntry->LastHistoryCount; entry->LastHistoryMean = pLogEntry->LastHistoryMean; entry->Transition = pLogEntry->Transition; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetThreadpoolData(struct DacpThreadpoolData *threadpoolData) { if (threadpoolData == NULL) return E_INVALIDARG; SOSDacEnter(); threadpoolData->cpuUtilization = ThreadpoolMgr::cpuUtilization; threadpoolData->MinLimitTotalWorkerThreads = ThreadpoolMgr::MinLimitTotalWorkerThreads; threadpoolData->MaxLimitTotalWorkerThreads = ThreadpoolMgr::MaxLimitTotalWorkerThreads; // // Read ThreadpoolMgr::WorkerCounter // TADDR pCounter = DacGetTargetAddrForHostAddr(&ThreadpoolMgr::WorkerCounter,true); ThreadpoolMgr::ThreadCounter counter; DacReadAll(pCounter,&counter,sizeof(ThreadpoolMgr::ThreadCounter),true); ThreadpoolMgr::ThreadCounter::Counts counts = counter.counts; threadpoolData->NumWorkingWorkerThreads = counts.NumWorking; threadpoolData->NumIdleWorkerThreads = counts.NumActive - counts.NumWorking; threadpoolData->NumRetiredWorkerThreads = counts.NumRetired; threadpoolData->FirstUnmanagedWorkRequest = HOST_CDADDR(ThreadpoolMgr::WorkRequestHead); threadpoolData->HillClimbingLog = dac_cast<TADDR>(&HillClimbingLog); threadpoolData->HillClimbingLogFirstIndex = HillClimbingLogFirstIndex; threadpoolData->HillClimbingLogSize = HillClimbingLogSize; // // Read ThreadpoolMgr::CPThreadCounter // pCounter = DacGetTargetAddrForHostAddr(&ThreadpoolMgr::CPThreadCounter,true); DacReadAll(pCounter,&counter,sizeof(ThreadpoolMgr::ThreadCounter),true); counts = counter.counts; threadpoolData->NumCPThreads = (LONG)(counts.NumActive + counts.NumRetired); threadpoolData->NumFreeCPThreads = (LONG)(counts.NumActive - counts.NumWorking); threadpoolData->MaxFreeCPThreads = ThreadpoolMgr::MaxFreeCPThreads; threadpoolData->NumRetiredCPThreads = (LONG)(counts.NumRetired); threadpoolData->MaxLimitTotalCPThreads = ThreadpoolMgr::MaxLimitTotalCPThreads; threadpoolData->CurrentLimitTotalCPThreads = (LONG)(counts.NumActive); //legacy: currently has no meaning threadpoolData->MinLimitTotalCPThreads = ThreadpoolMgr::MinLimitTotalCPThreads; TADDR pEntry = DacGetTargetAddrForHostAddr(&ThreadpoolMgr::TimerQueue,true); ThreadpoolMgr::LIST_ENTRY entry; DacReadAll(pEntry,&entry,sizeof(ThreadpoolMgr::LIST_ENTRY),true); TADDR node = (TADDR) entry.Flink; threadpoolData->NumTimers = 0; while (node && node != pEntry) { threadpoolData->NumTimers++; DacReadAll(node,&entry,sizeof(ThreadpoolMgr::LIST_ENTRY),true); node = (TADDR) entry.Flink; } threadpoolData->AsyncTimerCallbackCompletionFPtr = (CLRDATA_ADDRESS) GFN_TADDR(ThreadpoolMgr__AsyncTimerCallbackCompletion); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetThreadStoreData(struct DacpThreadStoreData *threadStoreData) { SOSDacEnter(); ThreadStore* threadStore = ThreadStore::s_pThreadStore; if (!threadStore) { hr = E_UNEXPECTED; } else { // initialize the fields of our local structure threadStoreData->threadCount = threadStore->m_ThreadCount; threadStoreData->unstartedThreadCount = threadStore->m_UnstartedThreadCount; threadStoreData->backgroundThreadCount = threadStore->m_BackgroundThreadCount; threadStoreData->pendingThreadCount = threadStore->m_PendingThreadCount; threadStoreData->deadThreadCount = threadStore->m_DeadThreadCount; threadStoreData->fHostConfig = FALSE; // identify the "important" threads threadStoreData->firstThread = HOST_CDADDR(threadStore->m_ThreadList.GetHead()); threadStoreData->finalizerThread = HOST_CDADDR(g_pFinalizerThread); threadStoreData->gcThread = HOST_CDADDR(g_pSuspensionThread); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetStressLogAddress(CLRDATA_ADDRESS *stressLog) { if (stressLog == NULL) return E_INVALIDARG; #ifdef STRESS_LOG SOSDacEnter(); if (g_pStressLog.IsValid()) *stressLog = HOST_CDADDR(g_pStressLog); else hr = E_FAIL; SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif // STRESS_LOG } HRESULT ClrDataAccess::GetJitManagerList(unsigned int count, struct DacpJitManagerInfo managers[], unsigned int *pNeeded) { SOSDacEnter(); if (managers) { if (count >= 1) { EEJitManager * managerPtr = ExecutionManager::GetEEJitManager(); DacpJitManagerInfo *currentPtr = &managers[0]; currentPtr->managerAddr = HOST_CDADDR(managerPtr); currentPtr->codeType = managerPtr->GetCodeType(); EEJitManager *eeJitManager = PTR_EEJitManager(PTR_HOST_TO_TADDR(managerPtr)); currentPtr->ptrHeapList = HOST_CDADDR(eeJitManager->m_pCodeHeap); } } else if (pNeeded) { *pNeeded = 1; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodTableSlot(CLRDATA_ADDRESS mt, unsigned int slot, CLRDATA_ADDRESS *value) { if (mt == 0 || value == NULL) return E_INVALIDARG; SOSDacEnter(); MethodTable* mTable = PTR_MethodTable(TO_TADDR(mt)); BOOL bIsFree = FALSE; if (!DacValidateMethodTable(mTable, bIsFree)) { hr = E_INVALIDARG; } else if (slot < mTable->GetNumVtableSlots()) { // Now get the slot: *value = mTable->GetRestoredSlot(slot); } else { hr = E_INVALIDARG; MethodTable::IntroducedMethodIterator it(mTable); for (; it.IsValid() && FAILED(hr); it.Next()) { MethodDesc * pMD = it.GetMethodDesc(); if (pMD->GetSlot() == slot) { *value = pMD->GetMethodEntryPoint(); hr = S_OK; } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetCodeHeapList(CLRDATA_ADDRESS jitManager, unsigned int count, struct DacpJitCodeHeapInfo codeHeaps[], unsigned int *pNeeded) { if (jitManager == NULL) return E_INVALIDARG; SOSDacEnter(); EEJitManager *pJitManager = PTR_EEJitManager(TO_TADDR(jitManager)); HeapList *heapList = pJitManager->m_pCodeHeap; if (codeHeaps) { unsigned int i = 0; while ((heapList != NULL) && (i < count)) { CodeHeap *codeHeap = heapList->pHeap; codeHeaps[i] = DACGetHeapInfoForCodeHeap(codeHeap); heapList = heapList->hpNext; i++; } if (pNeeded) *pNeeded = i; } else if (pNeeded) { int i = 0; while (heapList != NULL) { heapList = heapList->hpNext; i++; } *pNeeded = i; } else { hr = E_INVALIDARG; } SOSDacLeave(); return hr; } DacpJitCodeHeapInfo ClrDataAccess::DACGetHeapInfoForCodeHeap(CodeHeap *heapAddr) { DacpJitCodeHeapInfo jitCodeHeapInfo; TADDR targetVtblPtrForHeapType = VPTR_HOST_VTABLE_TO_TADDR(*(LPVOID*)heapAddr); if (targetVtblPtrForHeapType == LoaderCodeHeap::VPtrTargetVTable()) { LoaderCodeHeap *loaderCodeHeap = PTR_LoaderCodeHeap(PTR_HOST_TO_TADDR(heapAddr)); jitCodeHeapInfo.codeHeapType = CODEHEAP_LOADER; jitCodeHeapInfo.LoaderHeap = TO_CDADDR(PTR_HOST_MEMBER_TADDR(LoaderCodeHeap, loaderCodeHeap, m_LoaderHeap)); } else if (targetVtblPtrForHeapType == HostCodeHeap::VPtrTargetVTable()) { HostCodeHeap *hostCodeHeap = PTR_HostCodeHeap(PTR_HOST_TO_TADDR(heapAddr)); jitCodeHeapInfo.codeHeapType = CODEHEAP_HOST; jitCodeHeapInfo.HostData.baseAddr = PTR_CDADDR(hostCodeHeap->m_pBaseAddr); jitCodeHeapInfo.HostData.currentAddr = PTR_CDADDR(hostCodeHeap->m_pLastAvailableCommittedAddr); } else { jitCodeHeapInfo.codeHeapType = CODEHEAP_UNKNOWN; } return jitCodeHeapInfo; } HRESULT ClrDataAccess::GetStackLimits(CLRDATA_ADDRESS threadPtr, CLRDATA_ADDRESS *lower, CLRDATA_ADDRESS *upper, CLRDATA_ADDRESS *fp) { if (threadPtr == 0 || (lower == NULL && upper == NULL && fp == NULL)) return E_INVALIDARG; SOSDacEnter(); Thread * thread = PTR_Thread(TO_TADDR(threadPtr)); if (lower) *lower = TO_CDADDR(thread->GetCachedStackBase().GetAddr()); if (upper) *upper = TO_CDADDR(thread->GetCachedStackLimit().GetAddr()); if (fp) *fp = PTR_HOST_MEMBER_TADDR(Thread, thread, m_pFrame); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetRegisterName(int regNum, unsigned int count, _Inout_updates_z_(count) WCHAR *buffer, unsigned int *pNeeded) { if (!buffer && !pNeeded) return E_POINTER; #ifdef TARGET_AMD64 static const WCHAR *regs[] = { W("rax"), W("rcx"), W("rdx"), W("rbx"), W("rsp"), W("rbp"), W("rsi"), W("rdi"), W("r8"), W("r9"), W("r10"), W("r11"), W("r12"), W("r13"), W("r14"), W("r15"), }; #elif defined(TARGET_ARM) static const WCHAR *regs[] = { W("r0"), W("r1"), W("r2"), W("r3"), W("r4"), W("r5"), W("r6"), W("r7"), W("r8"), W("r9"), W("r10"), W("r11"), W("r12"), W("sp"), W("lr") }; #elif defined(TARGET_ARM64) static const WCHAR *regs[] = { W("X0"), W("X1"), W("X2"), W("X3"), W("X4"), W("X5"), W("X6"), W("X7"), W("X8"), W("X9"), W("X10"), W("X11"), W("X12"), W("X13"), W("X14"), W("X15"), W("X16"), W("X17"), W("X18"), W("X19"), W("X20"), W("X21"), W("X22"), W("X23"), W("X24"), W("X25"), W("X26"), W("X27"), W("X28"), W("Fp"), W("Lr"), W("Sp") }; #elif defined(TARGET_X86) static const WCHAR *regs[] = { W("eax"), W("ecx"), W("edx"), W("ebx"), W("esp"), W("ebp"), W("esi"), W("edi"), }; #endif // Caller frame registers are encoded as "-(reg+1)". bool callerFrame = regNum < 0; if (callerFrame) regNum = -regNum-1; if ((unsigned int)regNum >= ARRAY_SIZE(regs)) return E_UNEXPECTED; const WCHAR caller[] = W("caller."); unsigned int needed = (callerFrame?(unsigned int)wcslen(caller):0) + (unsigned int)wcslen(regs[regNum]) + 1; if (pNeeded) *pNeeded = needed; if (buffer) { _snwprintf_s(buffer, count, _TRUNCATE, W("%s%s"), callerFrame ? caller : W(""), regs[regNum]); if (count < needed) return S_FALSE; } return S_OK; } HRESULT ClrDataAccess::GetStackReferences(DWORD osThreadID, ISOSStackRefEnum **ppEnum) { if (ppEnum == NULL) return E_POINTER; SOSDacEnter(); DacStackReferenceWalker *walker = new (nothrow) DacStackReferenceWalker(this, osThreadID); if (walker == NULL) { hr = E_OUTOFMEMORY; } else { hr = walker->Init(); if (SUCCEEDED(hr)) hr = walker->QueryInterface(__uuidof(ISOSStackRefEnum), (void**)ppEnum); if (FAILED(hr)) { delete walker; *ppEnum = NULL; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetThreadFromThinlockID(UINT thinLockId, CLRDATA_ADDRESS *pThread) { if (pThread == NULL) return E_INVALIDARG; SOSDacEnter(); Thread *thread = g_pThinLockThreadIdDispenser->IdToThread(thinLockId); *pThread = PTR_HOST_TO_TADDR(thread); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetThreadAllocData(CLRDATA_ADDRESS addr, struct DacpAllocData *data) { if (data == NULL) return E_POINTER; SOSDacEnter(); Thread* thread = PTR_Thread(TO_TADDR(addr)); data->allocBytes = TO_CDADDR(thread->m_alloc_context.alloc_bytes); data->allocBytesLoh = TO_CDADDR(thread->m_alloc_context.alloc_bytes_uoh); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetHeapAllocData(unsigned int count, struct DacpGenerationAllocData *data, unsigned int *pNeeded) { if (data == 0 && pNeeded == NULL) return E_INVALIDARG; SOSDacEnter(); #if defined(FEATURE_SVR_GC) if (GCHeapUtilities::IsServerHeap()) { hr = GetServerAllocData(count, data, pNeeded); } else #endif //FEATURE_SVR_GC { if (pNeeded) *pNeeded = 1; if (data && count >= 1) { DPTR(unused_generation) table = g_gcDacGlobals->generation_table; for (unsigned int i=0; i < *g_gcDacGlobals->max_gen + 2; i++) { dac_generation entry = GenerationTableIndex(table, i); data[0].allocData[i].allocBytes = (CLRDATA_ADDRESS)(ULONG_PTR) entry.allocation_context.alloc_bytes; data[0].allocData[i].allocBytesLoh = (CLRDATA_ADDRESS)(ULONG_PTR) entry.allocation_context.alloc_bytes_uoh; } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetThreadData(CLRDATA_ADDRESS threadAddr, struct DacpThreadData *threadData) { SOSDacEnter(); // marshal the Thread object from the target Thread* thread = PTR_Thread(TO_TADDR(threadAddr)); // initialize our local copy from the marshaled target Thread instance ZeroMemory (threadData, sizeof(DacpThreadData)); threadData->corThreadId = thread->m_ThreadId; threadData->osThreadId = (DWORD)thread->m_OSThreadId; threadData->state = thread->m_State; threadData->preemptiveGCDisabled = thread->m_fPreemptiveGCDisabled; threadData->allocContextPtr = TO_CDADDR(thread->m_alloc_context.alloc_ptr); threadData->allocContextLimit = TO_CDADDR(thread->m_alloc_context.alloc_limit); threadData->fiberData = NULL; threadData->pFrame = PTR_CDADDR(thread->m_pFrame); threadData->context = PTR_CDADDR(thread->m_pDomain); threadData->domain = PTR_CDADDR(thread->m_pDomain); threadData->lockCount = (DWORD)-1; #ifndef TARGET_UNIX threadData->teb = TO_CDADDR(thread->m_pTEB); #else threadData->teb = NULL; #endif threadData->lastThrownObjectHandle = TO_CDADDR(thread->m_LastThrownObjectHandle); threadData->nextThread = HOST_CDADDR(ThreadStore::s_pThreadStore->m_ThreadList.GetNext(thread)); #ifdef FEATURE_EH_FUNCLETS if (thread->m_ExceptionState.m_pCurrentTracker) { threadData->firstNestedException = PTR_HOST_TO_TADDR( thread->m_ExceptionState.m_pCurrentTracker->m_pPrevNestedInfo); } #else threadData->firstNestedException = PTR_HOST_TO_TADDR( thread->m_ExceptionState.m_currentExInfo.m_pPrevNestedInfo); #endif // FEATURE_EH_FUNCLETS SOSDacLeave(); return hr; } #ifdef FEATURE_REJIT void CopyNativeCodeVersionToReJitData(NativeCodeVersion nativeCodeVersion, NativeCodeVersion activeCodeVersion, DacpReJitData * pReJitData) { pReJitData->rejitID = nativeCodeVersion.GetILCodeVersion().GetVersionId(); pReJitData->NativeCodeAddr = nativeCodeVersion.GetNativeCode(); if (nativeCodeVersion != activeCodeVersion) { pReJitData->flags = DacpReJitData::kReverted; } else { switch (nativeCodeVersion.GetILCodeVersion().GetRejitState()) { default: _ASSERTE(!"Unknown SharedRejitInfo state. DAC should be updated to understand this new state."); pReJitData->flags = DacpReJitData::kUnknown; break; case ILCodeVersion::kStateRequested: pReJitData->flags = DacpReJitData::kRequested; break; case ILCodeVersion::kStateActive: pReJitData->flags = DacpReJitData::kActive; break; } } } #endif // FEATURE_REJIT //--------------------------------------------------------------------------------------- // // Given a method desc addr, this loads up DacpMethodDescData and multiple DacpReJitDatas // with data on that method // // Arguments: // * methodDesc - MD to look up // * ip - IP address of interest (e.g., from an !ip2md call). This is used to ensure // the rejitted version corresponding to this IP is returned. May be NULL if you // don't care. // * methodDescData - [out] DacpMethodDescData to populate // * cRevertedRejitVersions - Number of entries allocated in rgRevertedRejitData // array // * rgRevertedRejitData - [out] Array of DacpReJitDatas to populate with rejitted // rejit version data // * pcNeededRevertedRejitData - [out] If cRevertedRejitVersions==0, the total // number of available rejit versions (including the current version) is // returned here. Else, the number of reverted rejit data actually fetched is // returned here. // // Return Value: // HRESULT indicating success or failure. // HRESULT ClrDataAccess::GetMethodDescData( CLRDATA_ADDRESS methodDesc, CLRDATA_ADDRESS ip, struct DacpMethodDescData *methodDescData, ULONG cRevertedRejitVersions, DacpReJitData * rgRevertedRejitData, ULONG * pcNeededRevertedRejitData) { if (methodDesc == 0) return E_INVALIDARG; if ((cRevertedRejitVersions != 0) && (rgRevertedRejitData == NULL)) { return E_INVALIDARG; } if ((rgRevertedRejitData != NULL) && (pcNeededRevertedRejitData == NULL)) { // If you're asking for reverted rejit data, you'd better ask for the number of // elements we return return E_INVALIDARG; } SOSDacEnter(); PTR_MethodDesc pMD = PTR_MethodDesc(TO_TADDR(methodDesc)); if (!DacValidateMD(pMD)) { hr = E_INVALIDARG; } else { ZeroMemory(methodDescData, sizeof(DacpMethodDescData)); if (rgRevertedRejitData != NULL) ZeroMemory(rgRevertedRejitData, sizeof(*rgRevertedRejitData) * cRevertedRejitVersions); if (pcNeededRevertedRejitData != NULL) *pcNeededRevertedRejitData = 0; NativeCodeVersion requestedNativeCodeVersion, activeNativeCodeVersion; if (ip != NULL) { requestedNativeCodeVersion = ExecutionManager::GetNativeCodeVersion(CLRDATA_ADDRESS_TO_TADDR(ip)); } else { #ifdef FEATURE_CODE_VERSIONING activeNativeCodeVersion = pMD->GetCodeVersionManager()->GetActiveILCodeVersion(pMD).GetActiveNativeCodeVersion(pMD); #else activeNativeCodeVersion = NativeCodeVersion(pMD); #endif requestedNativeCodeVersion = activeNativeCodeVersion; } methodDescData->requestedIP = ip; methodDescData->bIsDynamic = (pMD->IsLCGMethod()) ? TRUE : FALSE; methodDescData->wSlotNumber = pMD->GetSlot(); if (!requestedNativeCodeVersion.IsNull() && requestedNativeCodeVersion.GetNativeCode() != NULL) { methodDescData->bHasNativeCode = TRUE; methodDescData->NativeCodeAddr = TO_CDADDR(PCODEToPINSTR(requestedNativeCodeVersion.GetNativeCode())); } else { methodDescData->bHasNativeCode = FALSE; methodDescData->NativeCodeAddr = (CLRDATA_ADDRESS)-1; } methodDescData->AddressOfNativeCodeSlot = pMD->HasNativeCodeSlot() ? TO_CDADDR(dac_cast<TADDR>(pMD->GetAddrOfNativeCodeSlot())) : NULL; methodDescData->MDToken = pMD->GetMemberDef(); methodDescData->MethodDescPtr = methodDesc; methodDescData->MethodTablePtr = HOST_CDADDR(pMD->GetMethodTable()); methodDescData->ModulePtr = HOST_CDADDR(pMD->GetModule()); #ifdef FEATURE_REJIT // If rejit info is appropriate, get the following: // * ReJitInfo for the current, active version of the method // * ReJitInfo for the requested IP (for !ip2md and !u) // * ReJitInfos for all reverted versions of the method (up to // cRevertedRejitVersions) // // Minidumps will not have all this rejit info, and failure to get rejit info // should not be fatal. So enclose all rejit stuff in a try. EX_TRY { CodeVersionManager *pCodeVersionManager = pMD->GetCodeVersionManager(); // Current ReJitInfo if (activeNativeCodeVersion.IsNull()) { ILCodeVersion activeILCodeVersion = pCodeVersionManager->GetActiveILCodeVersion(pMD); activeNativeCodeVersion = activeILCodeVersion.GetActiveNativeCodeVersion(pMD); } CopyNativeCodeVersionToReJitData( activeNativeCodeVersion, activeNativeCodeVersion, &methodDescData->rejitDataCurrent); // Requested ReJitInfo _ASSERTE(methodDescData->rejitDataRequested.rejitID == 0); if (ip != NULL && !requestedNativeCodeVersion.IsNull()) { CopyNativeCodeVersionToReJitData( requestedNativeCodeVersion, activeNativeCodeVersion, &methodDescData->rejitDataRequested); } // Total number of jitted rejit versions ULONG cJittedRejitVersions; if (SUCCEEDED(ReJitManager::GetReJITIDs(pMD, 0 /* cReJitIds */, &cJittedRejitVersions, NULL /* reJitIds */))) { methodDescData->cJittedRejitVersions = cJittedRejitVersions; } // Reverted ReJitInfos if (rgRevertedRejitData == NULL) { // No reverted rejit versions will be returned, but maybe caller wants a // count of all versions if (pcNeededRevertedRejitData != NULL) { *pcNeededRevertedRejitData = methodDescData->cJittedRejitVersions; } } else { // Caller wants some reverted rejit versions. Gather reverted rejit version data to return ULONG cReJitIds; StackSArray<ReJITID> reJitIds; // Prepare array to populate with rejitids. "+ 1" because GetReJITIDs // returns all available rejitids, including the rejitid for the one non-reverted // current version. ReJITID *rgReJitIds = reJitIds.OpenRawBuffer(cRevertedRejitVersions + 1); if (rgReJitIds != NULL) { hr = ReJitManager::GetReJITIDs(pMD, cRevertedRejitVersions + 1, &cReJitIds, rgReJitIds); if (SUCCEEDED(hr)) { // Go through rejitids. For each reverted one, populate a entry in rgRevertedRejitData reJitIds.CloseRawBuffer(cReJitIds); ULONG iRejitDataReverted = 0; ILCodeVersion activeVersion = pCodeVersionManager->GetActiveILCodeVersion(pMD); for (COUNT_T i = 0; (i < cReJitIds) && (iRejitDataReverted < cRevertedRejitVersions); i++) { ILCodeVersion ilCodeVersion = pCodeVersionManager->GetILCodeVersion(pMD, reJitIds[i]); if ((ilCodeVersion.IsNull()) || (ilCodeVersion == activeVersion)) { continue; } NativeCodeVersion activeRejitChild = ilCodeVersion.GetActiveNativeCodeVersion(pMD); CopyNativeCodeVersionToReJitData( activeRejitChild, activeNativeCodeVersion, &rgRevertedRejitData[iRejitDataReverted]); iRejitDataReverted++; } // pcNeededRevertedRejitData != NULL as per condition at top of function (cuz rgRevertedRejitData != // NULL). *pcNeededRevertedRejitData = iRejitDataReverted; } } } } EX_CATCH { if (pcNeededRevertedRejitData != NULL) *pcNeededRevertedRejitData = 0; } EX_END_CATCH(SwallowAllExceptions) hr = S_OK; // Failure to get rejitids is not fatal #endif // FEATURE_REJIT #ifdef HAVE_GCCOVER if (!requestedNativeCodeVersion.IsNull()) { PTR_GCCoverageInfo gcCover = requestedNativeCodeVersion.GetGCCoverageInfo(); if (gcCover != NULL) { // In certain minidumps, we won't save the gccover information. // (it would be unwise to do so, it is heavy and not a customer scenario). methodDescData->GCStressCodeCopy = HOST_CDADDR(gcCover) + offsetof(GCCoverageInfo, savedCode); } } #endif // HAVE_GCCOVER // Set this above Dario since you know how to tell if dynamic if (methodDescData->bIsDynamic) { DynamicMethodDesc *pDynamicMethod = PTR_DynamicMethodDesc(TO_TADDR(methodDesc)); if (pDynamicMethod) { LCGMethodResolver *pResolver = pDynamicMethod->GetLCGMethodResolver(); if (pResolver) { OBJECTREF value = pResolver->GetManagedResolver(); if (value) { FieldDesc *pField = (&g_CoreLib)->GetField(FIELD__DYNAMICRESOLVER__DYNAMIC_METHOD); _ASSERTE(pField); value = pField->GetRefValue(value); if (value) { methodDescData->managedDynamicMethodObject = PTR_HOST_TO_TADDR(value); } } } } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetTieredVersions( CLRDATA_ADDRESS methodDesc, int rejitId, struct DacpTieredVersionData *nativeCodeAddrs, int cNativeCodeAddrs, int *pcNativeCodeAddrs) { if (methodDesc == 0 || cNativeCodeAddrs == 0 || pcNativeCodeAddrs == NULL) { return E_INVALIDARG; } *pcNativeCodeAddrs = 0; SOSDacEnter(); #ifdef FEATURE_REJIT PTR_MethodDesc pMD = PTR_MethodDesc(TO_TADDR(methodDesc)); // If rejit info is appropriate, get the following: // * ReJitInfo for the current, active version of the method // * ReJitInfo for the requested IP (for !ip2md and !u) // * ReJitInfos for all reverted versions of the method (up to // cRevertedRejitVersions) // // Minidumps will not have all this rejit info, and failure to get rejit info // should not be fatal. So enclose all rejit stuff in a try. EX_TRY { CodeVersionManager *pCodeVersionManager = pMD->GetCodeVersionManager(); ILCodeVersion ilCodeVersion = pCodeVersionManager->GetILCodeVersion(pMD, rejitId); if (ilCodeVersion.IsNull()) { // Bad rejit ID hr = E_INVALIDARG; goto cleanup; } TADDR r2rImageBase = NULL; TADDR r2rImageEnd = NULL; { PTR_Module pModule = (PTR_Module)pMD->GetModule(); if (pModule->IsReadyToRun()) { PTR_PEImageLayout pImage = pModule->GetReadyToRunInfo()->GetImage(); r2rImageBase = dac_cast<TADDR>(pImage->GetBase()); r2rImageEnd = r2rImageBase + pImage->GetSize(); } } NativeCodeVersionCollection nativeCodeVersions = ilCodeVersion.GetNativeCodeVersions(pMD); int count = 0; for (NativeCodeVersionIterator iter = nativeCodeVersions.Begin(); iter != nativeCodeVersions.End(); iter++) { TADDR pNativeCode = PCODEToPINSTR((*iter).GetNativeCode()); nativeCodeAddrs[count].NativeCodeAddr = pNativeCode; PTR_NativeCodeVersionNode pNode = (*iter).AsNode(); nativeCodeAddrs[count].NativeCodeVersionNodePtr = TO_CDADDR(PTR_TO_TADDR(pNode)); if (r2rImageBase <= pNativeCode && pNativeCode < r2rImageEnd) { nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_ReadyToRun; } else if (pMD->IsEligibleForTieredCompilation()) { switch ((*iter).GetOptimizationTier()) { default: nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_Unknown; break; case NativeCodeVersion::OptimizationTier0: nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_QuickJitted; break; case NativeCodeVersion::OptimizationTier1: nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_OptimizedTier1; break; case NativeCodeVersion::OptimizationTier1OSR: nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_OptimizedTier1OSR; break; case NativeCodeVersion::OptimizationTierOptimized: nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_Optimized; break; } } else if (pMD->IsJitOptimizationDisabled()) { nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_MinOptJitted; } else { nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_Optimized; } ++count; if (count >= cNativeCodeAddrs) { hr = S_FALSE; break; } } *pcNativeCodeAddrs = count; } EX_CATCH { hr = E_FAIL; } EX_END_CATCH(SwallowAllExceptions) cleanup: ; #endif // FEATURE_REJIT SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodDescTransparencyData(CLRDATA_ADDRESS methodDesc, struct DacpMethodDescTransparencyData *data) { if (methodDesc == 0 || data == NULL) return E_INVALIDARG; SOSDacEnter(); MethodDesc *pMD = PTR_MethodDesc(TO_TADDR(methodDesc)); if (!DacValidateMD(pMD)) { hr = E_INVALIDARG; } else { ZeroMemory(data, sizeof(DacpMethodDescTransparencyData)); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetCodeHeaderData(CLRDATA_ADDRESS ip, struct DacpCodeHeaderData *codeHeaderData) { if (ip == 0 || codeHeaderData == NULL) return E_INVALIDARG; SOSDacEnter(); EECodeInfo codeInfo(TO_TADDR(ip)); if (!codeInfo.IsValid()) { // We may be able to walk stubs to find a method desc if it's not a jitted method. MethodDesc *methodDescI = MethodTable::GetMethodDescForSlotAddress(TO_TADDR(ip)); if (methodDescI == NULL) { hr = E_INVALIDARG; } else { codeHeaderData->MethodDescPtr = HOST_CDADDR(methodDescI); codeHeaderData->JITType = TYPE_UNKNOWN; codeHeaderData->GCInfo = NULL; codeHeaderData->MethodStart = NULL; codeHeaderData->MethodSize = 0; codeHeaderData->ColdRegionStart = NULL; } } else { codeHeaderData->MethodDescPtr = HOST_CDADDR(codeInfo.GetMethodDesc()); GetJITMethodInfo(&codeInfo, &codeHeaderData->JITType, &codeHeaderData->GCInfo); codeHeaderData->MethodStart = (CLRDATA_ADDRESS) codeInfo.GetStartAddress(); size_t methodSize = codeInfo.GetCodeManager()->GetFunctionSize(codeInfo.GetGCInfoToken()); _ASSERTE(FitsIn<DWORD>(methodSize)); codeHeaderData->MethodSize = static_cast<DWORD>(methodSize); IJitManager::MethodRegionInfo methodRegionInfo = {NULL, 0, NULL, 0}; codeInfo.GetMethodRegionInfo(&methodRegionInfo); codeHeaderData->HotRegionSize = (DWORD) methodRegionInfo.hotSize; codeHeaderData->ColdRegionSize = (DWORD) methodRegionInfo.coldSize; codeHeaderData->ColdRegionStart = (CLRDATA_ADDRESS) methodRegionInfo.coldStartAddress; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodDescPtrFromFrame(CLRDATA_ADDRESS frameAddr, CLRDATA_ADDRESS * ppMD) { if (frameAddr == 0 || ppMD == NULL) return E_INVALIDARG; SOSDacEnter(); Frame *pFrame = PTR_Frame(TO_TADDR(frameAddr)); CLRDATA_ADDRESS methodDescAddr = HOST_CDADDR(pFrame->GetFunction()); if ((methodDescAddr == NULL) || !DacValidateMD(PTR_MethodDesc(TO_TADDR(methodDescAddr)))) { hr = E_INVALIDARG; } else { *ppMD = methodDescAddr; hr = S_OK; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodDescPtrFromIP(CLRDATA_ADDRESS ip, CLRDATA_ADDRESS * ppMD) { if (ip == 0 || ppMD == NULL) return E_INVALIDARG; SOSDacEnter(); EECodeInfo codeInfo(TO_TADDR(ip)); if (!codeInfo.IsValid()) { hr = E_FAIL; } else { CLRDATA_ADDRESS pMD = HOST_CDADDR(codeInfo.GetMethodDesc()); if ((pMD == NULL) || !DacValidateMD(PTR_MethodDesc(TO_TADDR(pMD)))) { hr = E_INVALIDARG; } else { *ppMD = pMD; hr = S_OK; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodDescName(CLRDATA_ADDRESS methodDesc, unsigned int count, _Inout_updates_z_(count) WCHAR *name, unsigned int *pNeeded) { if (methodDesc == 0) return E_INVALIDARG; SOSDacEnter(); MethodDesc* pMD = PTR_MethodDesc(TO_TADDR(methodDesc)); StackSString str; EX_TRY { TypeString::AppendMethodInternal(str, pMD, TypeString::FormatSignature|TypeString::FormatNamespace|TypeString::FormatFullInst); } EX_CATCH { hr = E_FAIL; if (pMD->IsDynamicMethod()) { if (pMD->IsLCGMethod() || pMD->IsILStub()) { // In heap dumps, trying to format the signature can fail // in certain cases. str.Clear(); TypeString::AppendMethodInternal(str, pMD, TypeString::FormatNamespace|TypeString::FormatFullInst); hr = S_OK; } } else { #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS if (MdCacheGetEEName(TO_TADDR(methodDesc), str)) { hr = S_OK; } else { #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS str.Clear(); Module* pModule = pMD->GetModule(); if (pModule) { WCHAR path[MAX_LONGPATH]; COUNT_T nChars = 0; if (pModule->GetPath().DacGetUnicode(ARRAY_SIZE(path), path, &nChars) && nChars > 0 && nChars <= ARRAY_SIZE(path)) { WCHAR* pFile = path + nChars - 1; while ((pFile >= path) && (*pFile != W('\\'))) { pFile--; } pFile++; if (*pFile) { str.Append(pFile); str.Append(W("!Unknown")); hr = S_OK; } } } #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS } #endif } } EX_END_CATCH(SwallowAllExceptions) if (SUCCEEDED(hr)) { const WCHAR *val = str.GetUnicode(); if (pNeeded) *pNeeded = str.GetCount() + 1; if (name && count) { wcsncpy_s(name, count, val, _TRUNCATE); name[count-1] = 0; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetDomainFromContext(CLRDATA_ADDRESS contextAddr, CLRDATA_ADDRESS *domain) { if (contextAddr == 0 || domain == NULL) return E_INVALIDARG; SOSDacEnter(); *domain = contextAddr; // Context is same as the AppDomain in CoreCLR SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetObjectStringData(CLRDATA_ADDRESS obj, unsigned int count, _Inout_updates_z_(count) WCHAR *stringData, unsigned int *pNeeded) { if (obj == 0) return E_INVALIDARG; if ((stringData == 0 || count <= 0) && (pNeeded == NULL)) return E_INVALIDARG; SOSDacEnter(); TADDR mtTADDR = DACGetMethodTableFromObjectPointer(TO_TADDR(obj), m_pTarget); MethodTable *mt = PTR_MethodTable(mtTADDR); // Object must be a string BOOL bFree = FALSE; if (!DacValidateMethodTable(mt, bFree)) hr = E_INVALIDARG; else if (HOST_CDADDR(mt) != HOST_CDADDR(g_pStringClass)) hr = E_INVALIDARG; if (SUCCEEDED(hr)) { PTR_StringObject str(TO_TADDR(obj)); ULONG32 needed = (ULONG32)str->GetStringLength() + 1; if (stringData && count > 0) { if (count > needed) count = needed; TADDR pszStr = TO_TADDR(obj)+offsetof(StringObject, m_FirstChar); hr = m_pTarget->ReadVirtual(pszStr, (PBYTE)stringData, count * sizeof(WCHAR), &needed); if (SUCCEEDED(hr)) stringData[count - 1] = W('\0'); else stringData[0] = W('\0'); } else { hr = E_INVALIDARG; } if (pNeeded) *pNeeded = needed; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetObjectClassName(CLRDATA_ADDRESS obj, unsigned int count, _Inout_updates_z_(count) WCHAR *className, unsigned int *pNeeded) { if (obj == 0) return E_INVALIDARG; SOSDacEnter(); // Don't turn the Object into a pointer, it is too costly on // scans of the gc heap. MethodTable *mt = NULL; TADDR mtTADDR = DACGetMethodTableFromObjectPointer(CLRDATA_ADDRESS_TO_TADDR(obj), m_pTarget); if (mtTADDR != NULL) mt = PTR_MethodTable(mtTADDR); else hr = E_INVALIDARG; BOOL bFree = FALSE; if (SUCCEEDED(hr) && !DacValidateMethodTable(mt, bFree)) hr = E_INVALIDARG; if (SUCCEEDED(hr)) { // There is a case where metadata was unloaded and the AppendType call will fail. // This is when an AppDomain has been unloaded but not yet collected. PEAssembly *pPEAssembly = mt->GetModule()->GetPEAssembly(); if (pPEAssembly->GetPEImage() == NULL) { if (pNeeded) *pNeeded = 16; if (className) wcsncpy_s(className, count, W("<Unloaded Type>"), _TRUNCATE); } else { StackSString s; TypeString::AppendType(s, TypeHandle(mt), TypeString::FormatNamespace|TypeString::FormatFullInst); const WCHAR *val = s.GetUnicode(); if (pNeeded) *pNeeded = s.GetCount() + 1; if (className && count) { wcsncpy_s(className, count, val, _TRUNCATE); className[count-1] = 0; } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodDescFromToken(CLRDATA_ADDRESS moduleAddr, mdToken token, CLRDATA_ADDRESS *methodDesc) { if (moduleAddr == 0 || methodDesc == NULL) return E_INVALIDARG; SOSDacEnter(); Module* pModule = PTR_Module(TO_TADDR(moduleAddr)); TypeHandle th; switch (TypeFromToken(token)) { case mdtFieldDef: *methodDesc = HOST_CDADDR(pModule->LookupFieldDef(token)); break; case mdtMethodDef: *methodDesc = HOST_CDADDR(pModule->LookupMethodDef(token)); break; case mdtTypeDef: th = pModule->LookupTypeDef(token); *methodDesc = th.AsTAddr(); break; case mdtTypeRef: th = pModule->LookupTypeRef(token); *methodDesc = th.AsTAddr(); break; default: hr = E_INVALIDARG; break; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::TraverseModuleMap(ModuleMapType mmt, CLRDATA_ADDRESS moduleAddr, MODULEMAPTRAVERSE pCallback, LPVOID token) { if (moduleAddr == 0) return E_INVALIDARG; SOSDacEnter(); Module* pModule = PTR_Module(TO_TADDR(moduleAddr)); // We want to traverse these two tables, passing callback information switch (mmt) { case TYPEDEFTOMETHODTABLE: { LookupMap<PTR_MethodTable>::Iterator typeIter(&pModule->m_TypeDefToMethodTableMap); for (int i = 0; typeIter.Next(); i++) { if (typeIter.GetElement()) { MethodTable* pMT = typeIter.GetElement(); (pCallback)(i,PTR_HOST_TO_TADDR(pMT), token); } } } break; case TYPEREFTOMETHODTABLE: { LookupMap<PTR_TypeRef>::Iterator typeIter(&pModule->m_TypeRefToMethodTableMap); for (int i = 0; typeIter.Next(); i++) { if (typeIter.GetElement()) { MethodTable* pMT = TypeHandle::FromTAddr(dac_cast<TADDR>(typeIter.GetElement())).GetMethodTable(); (pCallback)(i,PTR_HOST_TO_TADDR(pMT), token); } } } break; default: hr = E_INVALIDARG; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetModule(CLRDATA_ADDRESS addr, IXCLRDataModule **mod) { if (addr == 0 || mod == NULL) return E_INVALIDARG; SOSDacEnter(); Module* pModule = PTR_Module(TO_TADDR(addr)); *mod = new ClrDataModule(this, pModule); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetModuleData(CLRDATA_ADDRESS addr, struct DacpModuleData *ModuleData) { if (addr == 0 || ModuleData == NULL) return E_INVALIDARG; SOSDacEnter(); Module* pModule = PTR_Module(TO_TADDR(addr)); ZeroMemory(ModuleData,sizeof(DacpModuleData)); ModuleData->Address = addr; ModuleData->PEAssembly = HOST_CDADDR(pModule->GetPEAssembly()); COUNT_T metadataSize = 0; if (!pModule->GetPEAssembly()->IsDynamic()) { ModuleData->ilBase = (CLRDATA_ADDRESS)(ULONG_PTR) pModule->GetPEAssembly()->GetIJWBase(); } ModuleData->metadataStart = (CLRDATA_ADDRESS)dac_cast<TADDR>(pModule->GetPEAssembly()->GetLoadedMetadata(&metadataSize)); ModuleData->metadataSize = (SIZE_T) metadataSize; ModuleData->bIsReflection = pModule->IsReflection(); ModuleData->bIsPEFile = pModule->IsPEFile(); ModuleData->Assembly = HOST_CDADDR(pModule->GetAssembly()); ModuleData->dwModuleID = pModule->GetModuleID(); ModuleData->dwModuleIndex = pModule->GetModuleIndex().m_dwIndex; ModuleData->dwTransientFlags = pModule->m_dwTransientFlags; EX_TRY { // // In minidump's case, these data structure is not avaiable. // ModuleData->TypeDefToMethodTableMap = PTR_CDADDR(pModule->m_TypeDefToMethodTableMap.pTable); ModuleData->TypeRefToMethodTableMap = PTR_CDADDR(pModule->m_TypeRefToMethodTableMap.pTable); ModuleData->MethodDefToDescMap = PTR_CDADDR(pModule->m_MethodDefToDescMap.pTable); ModuleData->FieldDefToDescMap = PTR_CDADDR(pModule->m_FieldDefToDescMap.pTable); ModuleData->MemberRefToDescMap = NULL; ModuleData->FileReferencesMap = PTR_CDADDR(pModule->m_FileReferencesMap.pTable); ModuleData->ManifestModuleReferencesMap = PTR_CDADDR(pModule->m_ManifestModuleReferencesMap.pTable); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions) SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetILForModule(CLRDATA_ADDRESS moduleAddr, DWORD rva, CLRDATA_ADDRESS *il) { if (moduleAddr == 0 || il == NULL) return E_INVALIDARG; SOSDacEnter(); Module* pModule = PTR_Module(TO_TADDR(moduleAddr)); *il = (TADDR)(CLRDATA_ADDRESS)pModule->GetIL(rva); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodTableData(CLRDATA_ADDRESS mt, struct DacpMethodTableData *MTData) { if (mt == 0 || MTData == NULL) return E_INVALIDARG; SOSDacEnter(); MethodTable* pMT = PTR_MethodTable(TO_TADDR(mt)); BOOL bIsFree = FALSE; if (!DacValidateMethodTable(pMT, bIsFree)) { hr = E_INVALIDARG; } else { ZeroMemory(MTData,sizeof(DacpMethodTableData)); MTData->BaseSize = pMT->GetBaseSize(); if(pMT->IsString()) MTData->BaseSize -= sizeof(WCHAR); MTData->ComponentSize = (DWORD)pMT->GetComponentSize(); MTData->bIsFree = bIsFree; if(!bIsFree) { MTData->Module = HOST_CDADDR(pMT->GetModule()); MTData->Class = HOST_CDADDR(pMT->GetClass()); MTData->ParentMethodTable = HOST_CDADDR(pMT->GetParentMethodTable());; MTData->wNumInterfaces = pMT->GetNumInterfaces(); MTData->wNumMethods = pMT->GetNumMethods(); MTData->wNumVtableSlots = pMT->GetNumVtableSlots(); MTData->wNumVirtuals = pMT->GetNumVirtuals(); MTData->cl = pMT->GetCl(); MTData->dwAttrClass = pMT->GetAttrClass(); MTData->bContainsPointers = pMT->ContainsPointers(); MTData->bIsShared = FALSE; MTData->bIsDynamic = pMT->IsDynamicStatics(); } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodTableName(CLRDATA_ADDRESS mt, unsigned int count, _Inout_updates_z_(count) WCHAR *mtName, unsigned int *pNeeded) { if (mt == 0) return E_INVALIDARG; SOSDacEnter(); MethodTable *pMT = PTR_MethodTable(TO_TADDR(mt)); BOOL free = FALSE; if (mt == HOST_CDADDR(g_pFreeObjectMethodTable)) { if (pNeeded) *pNeeded = 5; if (mtName && count) wcsncpy_s(mtName, count, W("Free"), _TRUNCATE); } else if (!DacValidateMethodTable(pMT, free)) { hr = E_INVALIDARG; } else { // There is a case where metadata was unloaded and the AppendType call will fail. // This is when an AppDomain has been unloaded but not yet collected. PEAssembly *pPEAssembly = pMT->GetModule()->GetPEAssembly(); if (pPEAssembly->GetPEImage() == NULL) { if (pNeeded) *pNeeded = 16; if (mtName) wcsncpy_s(mtName, count, W("<Unloaded Type>"), _TRUNCATE); } else { StackSString s; #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS EX_TRY { #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS TypeString::AppendType(s, TypeHandle(pMT), TypeString::FormatNamespace|TypeString::FormatFullInst); #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS } EX_CATCH { if (!MdCacheGetEEName(dac_cast<TADDR>(pMT), s)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS if (s.IsEmpty()) { hr = E_OUTOFMEMORY; } else { const WCHAR *val = s.GetUnicode(); if (pNeeded) *pNeeded = s.GetCount() + 1; if (mtName && count) { wcsncpy_s(mtName, count, val, _TRUNCATE); mtName[count-1] = 0; } } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetFieldDescData(CLRDATA_ADDRESS addr, struct DacpFieldDescData *FieldDescData) { if (addr == 0 || FieldDescData == NULL) return E_INVALIDARG; SOSDacEnter(); FieldDesc* pFieldDesc = PTR_FieldDesc(TO_TADDR(addr)); FieldDescData->Type = pFieldDesc->GetFieldType(); FieldDescData->sigType = FieldDescData->Type; EX_TRY { // minidump case, we do not have the field's type's type handle! // Strike should be able to form name based on the metadata token in // the field desc. Find type is using look up map which is huge. We cannot // drag in this data structure in minidump's case. // TypeHandle th = pFieldDesc->LookupFieldTypeHandle(); MethodTable *pMt = th.GetMethodTable(); if (pMt) { FieldDescData->MTOfType = HOST_CDADDR(th.GetMethodTable()); } else { FieldDescData->MTOfType = NULL; } } EX_CATCH { FieldDescData->MTOfType = NULL; } EX_END_CATCH(SwallowAllExceptions) // TODO: This is not currently useful, I need to get the module of the // type definition not that of the field description. // TODO: Is there an easier way to get this information? // I'm getting the typeDef of a (possibly unloaded) type. MetaSig tSig(pFieldDesc); tSig.NextArg(); SigPointer sp1 = tSig.GetArgProps(); CorElementType et; hr = sp1.GetElemType(&et); // throw away the value, we just need to walk past. if (SUCCEEDED(hr)) { if (et == ELEMENT_TYPE_CLASS || et == ELEMENT_TYPE_VALUETYPE) // any other follows token? { hr = sp1.GetToken(&(FieldDescData->TokenOfType)); } else { // There is no encoded token of field type FieldDescData->TokenOfType = mdTypeDefNil; if (FieldDescData->MTOfType == NULL) { // If there is no encoded token (that is, it is primitive type) and no MethodTable for it, remember the // element_type from signature // FieldDescData->sigType = et; } } } FieldDescData->ModuleOfType = HOST_CDADDR(pFieldDesc->GetModule()); FieldDescData->mb = pFieldDesc->GetMemberDef(); FieldDescData->MTOfEnclosingClass = HOST_CDADDR(pFieldDesc->GetApproxEnclosingMethodTable()); FieldDescData->dwOffset = pFieldDesc->GetOffset(); FieldDescData->bIsThreadLocal = pFieldDesc->IsThreadStatic(); FieldDescData->bIsContextLocal = FALSE; FieldDescData->bIsStatic = pFieldDesc->IsStatic(); FieldDescData->NextField = HOST_CDADDR(PTR_FieldDesc(PTR_HOST_TO_TADDR(pFieldDesc) + sizeof(FieldDesc))); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodTableFieldData(CLRDATA_ADDRESS mt, struct DacpMethodTableFieldData *data) { if (mt == 0 || data == NULL) return E_INVALIDARG; SOSDacEnter(); MethodTable* pMT = PTR_MethodTable(TO_TADDR(mt)); BOOL bIsFree = FALSE; if (!pMT || !DacValidateMethodTable(pMT, bIsFree)) { hr = E_INVALIDARG; } else { data->wNumInstanceFields = pMT->GetNumInstanceFields(); data->wNumStaticFields = pMT->GetNumStaticFields(); data->wNumThreadStaticFields = pMT->GetNumThreadStaticFields(); data->FirstField = PTR_TO_TADDR(pMT->GetClass()->GetFieldDescList()); data->wContextStaticsSize = 0; data->wContextStaticOffset = 0; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodTableCollectibleData(CLRDATA_ADDRESS mt, struct DacpMethodTableCollectibleData *data) { if (mt == 0 || data == NULL) return E_INVALIDARG; SOSDacEnter(); MethodTable* pMT = PTR_MethodTable(TO_TADDR(mt)); BOOL bIsFree = FALSE; if (!pMT || !DacValidateMethodTable(pMT, bIsFree)) { hr = E_INVALIDARG; } else { data->bCollectible = pMT->Collectible(); if (data->bCollectible) { data->LoaderAllocatorObjectHandle = pMT->GetLoaderAllocatorObjectHandle(); } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodTableTransparencyData(CLRDATA_ADDRESS mt, struct DacpMethodTableTransparencyData *pTransparencyData) { if (mt == 0 || pTransparencyData == NULL) return E_INVALIDARG; SOSDacEnter(); MethodTable *pMT = PTR_MethodTable(TO_TADDR(mt)); BOOL bIsFree = FALSE; if (!DacValidateMethodTable(pMT, bIsFree)) { hr = E_INVALIDARG; } else { ZeroMemory(pTransparencyData, sizeof(DacpMethodTableTransparencyData)); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodTableForEEClass(CLRDATA_ADDRESS eeClass, CLRDATA_ADDRESS *value) { if (eeClass == 0 || value == NULL) return E_INVALIDARG; SOSDacEnter(); EEClass * pClass = PTR_EEClass(TO_TADDR(eeClass)); if (!DacValidateEEClass(pClass)) { hr = E_INVALIDARG; } else { *value = HOST_CDADDR(pClass->GetMethodTable()); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetFrameName(CLRDATA_ADDRESS vtable, unsigned int count, _Inout_updates_z_(count) WCHAR *frameName, unsigned int *pNeeded) { if (vtable == 0) return E_INVALIDARG; SOSDacEnter(); PWSTR pszName = DacGetVtNameW(CLRDATA_ADDRESS_TO_TADDR(vtable)); if (pszName == NULL) { hr = E_INVALIDARG; } else { // Turn from bytes to wide characters unsigned int len = (unsigned int)wcslen(pszName); if (frameName) { wcsncpy_s(frameName, count, pszName, _TRUNCATE); if (pNeeded) { if (count < len) *pNeeded = count - 1; else *pNeeded = len; } } else if (pNeeded) { *pNeeded = len + 1; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetPEFileName(CLRDATA_ADDRESS addr, unsigned int count, _Inout_updates_z_(count) WCHAR *fileName, unsigned int *pNeeded) { if (addr == 0 || (fileName == NULL && pNeeded == NULL) || (fileName != NULL && count == 0)) return E_INVALIDARG; SOSDacEnter(); PEAssembly* pPEAssembly = PTR_PEAssembly(TO_TADDR(addr)); // Turn from bytes to wide characters if (!pPEAssembly->GetPath().IsEmpty()) { if (!pPEAssembly->GetPath().DacGetUnicode(count, fileName, pNeeded)) hr = E_FAIL; } else if (!pPEAssembly->IsDynamic()) { StackSString displayName; pPEAssembly->GetDisplayName(displayName, 0); if (displayName.IsEmpty()) { if (fileName) fileName[0] = 0; if (pNeeded) *pNeeded = 1; } else { unsigned int len = displayName.GetCount()+1; if (fileName) { wcsncpy_s(fileName, count, displayName.GetUnicode(), _TRUNCATE); if (count < len) len = count; } if (pNeeded) *pNeeded = len; } } else { if (fileName && count) fileName[0] = 0; if (pNeeded) *pNeeded = 1; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetPEFileBase(CLRDATA_ADDRESS addr, CLRDATA_ADDRESS *base) { if (addr == 0 || base == NULL) return E_INVALIDARG; SOSDacEnter(); PEAssembly* pPEAssembly = PTR_PEAssembly(TO_TADDR(addr)); // More fields later? if (!pPEAssembly->IsDynamic()) *base = TO_CDADDR(pPEAssembly->GetIJWBase()); else *base = NULL; SOSDacLeave(); return hr; } DWORD DACGetNumComponents(TADDR addr, ICorDebugDataTarget* target) { // For an object pointer, this attempts to read the number of // array components. addr+=sizeof(size_t); ULONG32 returned = 0; DWORD Value = NULL; HRESULT hr = target->ReadVirtual(addr, (PBYTE)&Value, sizeof(DWORD), &returned); if ((hr != S_OK) || (returned != sizeof(DWORD))) { return 0; } return Value; } HRESULT ClrDataAccess::GetObjectData(CLRDATA_ADDRESS addr, struct DacpObjectData *objectData) { if (addr == 0 || objectData == NULL) return E_INVALIDARG; SOSDacEnter(); ZeroMemory (objectData, sizeof(DacpObjectData)); TADDR mtTADDR = DACGetMethodTableFromObjectPointer(CLRDATA_ADDRESS_TO_TADDR(addr),m_pTarget); if (mtTADDR==NULL) hr = E_INVALIDARG; BOOL bFree = FALSE; MethodTable *mt = NULL; if (SUCCEEDED(hr)) { mt = PTR_MethodTable(mtTADDR); if (!DacValidateMethodTable(mt, bFree)) hr = E_INVALIDARG; } if (SUCCEEDED(hr)) { objectData->MethodTable = HOST_CDADDR(mt); objectData->Size = mt->GetBaseSize(); if (mt->GetComponentSize()) { objectData->Size += (DACGetNumComponents(CLRDATA_ADDRESS_TO_TADDR(addr),m_pTarget) * mt->GetComponentSize()); objectData->dwComponentSize = mt->GetComponentSize(); } if (bFree) { objectData->ObjectType = OBJ_FREE; } else { if (objectData->MethodTable == HOST_CDADDR(g_pStringClass)) { objectData->ObjectType = OBJ_STRING; } else if (objectData->MethodTable == HOST_CDADDR(g_pObjectClass)) { objectData->ObjectType = OBJ_OBJECT; } else if (mt->IsArray()) { objectData->ObjectType = OBJ_ARRAY; // For now, go ahead and instantiate array classes. // TODO: avoid instantiating even object Arrays in the host. // NOTE: This code is carefully written to deal with MethodTable fields // in the array object having the mark bit set (because we may // be in mark phase when this function is called). ArrayBase *pArrayObj = PTR_ArrayBase(TO_TADDR(addr)); objectData->ElementType = mt->GetArrayElementType(); TypeHandle thElem = mt->GetArrayElementTypeHandle(); TypeHandle thCur = thElem; while (thCur.IsArray()) thCur = thCur.GetArrayElementTypeHandle(); TADDR mtCurTADDR = thCur.AsTAddr(); if (!DacValidateMethodTable(PTR_MethodTable(mtCurTADDR), bFree)) { hr = E_INVALIDARG; } else { objectData->ElementTypeHandle = (CLRDATA_ADDRESS)(thElem.AsTAddr()); objectData->dwRank = mt->GetRank(); objectData->dwNumComponents = pArrayObj->GetNumComponents (); objectData->ArrayDataPtr = PTR_CDADDR(pArrayObj->GetDataPtr (TRUE)); objectData->ArrayBoundsPtr = HOST_CDADDR(pArrayObj->GetBoundsPtr()); objectData->ArrayLowerBoundsPtr = HOST_CDADDR(pArrayObj->GetLowerBoundsPtr()); } } else { objectData->ObjectType = OBJ_OTHER; } } } #ifdef FEATURE_COMINTEROP if (SUCCEEDED(hr)) { EX_TRY_ALLOW_DATATARGET_MISSING_MEMORY { PTR_SyncBlock pSyncBlk = DACGetSyncBlockFromObjectPointer(CLRDATA_ADDRESS_TO_TADDR(addr), m_pTarget); if (pSyncBlk != NULL) { // see if we have an RCW and/or CCW associated with this object PTR_InteropSyncBlockInfo pInfo = pSyncBlk->GetInteropInfoNoCreate(); if (pInfo != NULL) { objectData->RCW = TO_CDADDR(pInfo->DacGetRawRCW()); objectData->CCW = HOST_CDADDR(pInfo->GetCCW()); } } } EX_END_CATCH_ALLOW_DATATARGET_MISSING_MEMORY; } #endif // FEATURE_COMINTEROP SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAppDomainList(unsigned int count, CLRDATA_ADDRESS values[], unsigned int *fetched) { SOSDacEnter(); AppDomainIterator ai(FALSE); unsigned int i = 0; while (ai.Next() && (i < count)) { if (values) values[i] = HOST_CDADDR(ai.GetDomain()); i++; } if (fetched) *fetched = i; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAppDomainStoreData(struct DacpAppDomainStoreData *adsData) { SOSDacEnter(); adsData->systemDomain = HOST_CDADDR(SystemDomain::System()); adsData->sharedDomain = NULL; // Get an accurate count of appdomains. adsData->DomainCount = 0; AppDomainIterator ai(FALSE); while (ai.Next()) adsData->DomainCount++; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAppDomainData(CLRDATA_ADDRESS addr, struct DacpAppDomainData *appdomainData) { SOSDacEnter(); if (addr == 0) { hr = E_INVALIDARG; } else { PTR_BaseDomain pBaseDomain = PTR_BaseDomain(TO_TADDR(addr)); ZeroMemory(appdomainData, sizeof(DacpAppDomainData)); appdomainData->AppDomainPtr = PTR_CDADDR(pBaseDomain); PTR_LoaderAllocator pLoaderAllocator = pBaseDomain->GetLoaderAllocator(); appdomainData->pHighFrequencyHeap = HOST_CDADDR(pLoaderAllocator->GetHighFrequencyHeap()); appdomainData->pLowFrequencyHeap = HOST_CDADDR(pLoaderAllocator->GetLowFrequencyHeap()); appdomainData->pStubHeap = HOST_CDADDR(pLoaderAllocator->GetStubHeap()); appdomainData->appDomainStage = STAGE_OPEN; if (pBaseDomain->IsAppDomain()) { AppDomain * pAppDomain = pBaseDomain->AsAppDomain(); appdomainData->DomainLocalBlock = 0; appdomainData->pDomainLocalModules = 0; appdomainData->dwId = DefaultADID; appdomainData->appDomainStage = (DacpAppDomainDataStage)pAppDomain->m_Stage.Load(); if (pAppDomain->IsActive()) { // The assembly list is not valid in a closed appdomain. AppDomain::AssemblyIterator i = pAppDomain->IterateAssembliesEx((AssemblyIterationFlags)( kIncludeLoading | kIncludeLoaded | kIncludeExecution)); CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly; while (i.Next(pDomainAssembly.This())) { if (pDomainAssembly->IsLoaded()) { appdomainData->AssemblyCount++; } } AppDomain::FailedAssemblyIterator j = pAppDomain->IterateFailedAssembliesEx(); while (j.Next()) { appdomainData->FailedAssemblyCount++; } } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetFailedAssemblyData(CLRDATA_ADDRESS assembly, unsigned int *pContext, HRESULT *pResult) { if (assembly == NULL || (pContext == NULL && pResult == NULL)) { return E_INVALIDARG; } SOSDacEnter(); FailedAssembly* pAssembly = PTR_FailedAssembly(TO_TADDR(assembly)); if (!pAssembly) { hr = E_INVALIDARG; } else { if (pResult) *pResult = pAssembly->error; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetFailedAssemblyLocation(CLRDATA_ADDRESS assembly, unsigned int count, _Inout_updates_z_(count) WCHAR *location, unsigned int *pNeeded) { if (assembly == NULL || (location == NULL && pNeeded == NULL) || (location != NULL && count == 0)) return E_INVALIDARG; SOSDacEnter(); FailedAssembly* pAssembly = PTR_FailedAssembly(TO_TADDR(assembly)); // Turn from bytes to wide characters if (!pAssembly->location.IsEmpty()) { if (!pAssembly->location.DacGetUnicode(count, location, pNeeded)) { hr = E_FAIL; } } else { if (pNeeded) *pNeeded = 1; if (location) location[0] = 0; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetFailedAssemblyDisplayName(CLRDATA_ADDRESS assembly, unsigned int count, _Inout_updates_z_(count) WCHAR *name, unsigned int *pNeeded) { if (assembly == NULL || (name == NULL && pNeeded == NULL) || (name != NULL && count == 0)) return E_INVALIDARG; SOSDacEnter(); FailedAssembly* pAssembly = PTR_FailedAssembly(TO_TADDR(assembly)); if (!pAssembly->displayName.IsEmpty()) { if (!pAssembly->displayName.DacGetUnicode(count, name, pNeeded)) { hr = E_FAIL; } } else { if (pNeeded) *pNeeded = 1; if (name) name[0] = 0; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAssemblyList(CLRDATA_ADDRESS addr, int count, CLRDATA_ADDRESS values[], int *pNeeded) { if (addr == NULL) return E_INVALIDARG; SOSDacEnter(); BaseDomain* pBaseDomain = PTR_BaseDomain(TO_TADDR(addr)); int n=0; if (pBaseDomain->IsAppDomain()) { AppDomain::AssemblyIterator i = pBaseDomain->AsAppDomain()->IterateAssembliesEx( (AssemblyIterationFlags)(kIncludeLoading | kIncludeLoaded | kIncludeExecution)); CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly; if (values) { while (i.Next(pDomainAssembly.This()) && (n < count)) { if (pDomainAssembly->IsLoaded()) { CollectibleAssemblyHolder<Assembly *> pAssembly = pDomainAssembly->GetAssembly(); // Note: DAC doesn't need to keep the assembly alive - see code:CollectibleAssemblyHolder#CAH_DAC values[n++] = HOST_CDADDR(pAssembly.Extract()); } } } else { while (i.Next(pDomainAssembly.This())) if (pDomainAssembly->IsLoaded()) n++; } if (pNeeded) *pNeeded = n; } else { // The only other type of BaseDomain is the SystemDomain, and we shouldn't be asking // for the assemblies in it. _ASSERTE(false); hr = E_INVALIDARG; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetFailedAssemblyList(CLRDATA_ADDRESS appDomain, int count, CLRDATA_ADDRESS values[], unsigned int *pNeeded) { if ((appDomain == NULL) || (values == NULL && pNeeded == NULL)) { return E_INVALIDARG; } SOSDacEnter(); AppDomain* pAppDomain = PTR_AppDomain(TO_TADDR(appDomain)); int n=0; AppDomain::FailedAssemblyIterator i = pAppDomain->IterateFailedAssembliesEx(); while (i.Next() && n<=count) { if (values) values[n] = HOST_CDADDR(i.GetFailedAssembly()); n++; } if (pNeeded) *pNeeded = n; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAppDomainName(CLRDATA_ADDRESS addr, unsigned int count, _Inout_updates_z_(count) WCHAR *name, unsigned int *pNeeded) { SOSDacEnter(); PTR_BaseDomain pBaseDomain = PTR_BaseDomain(TO_TADDR(addr)); if (!pBaseDomain->IsAppDomain()) { // Shared domain and SystemDomain don't have this field. if (pNeeded) *pNeeded = 1; if (name) name[0] = 0; } else { AppDomain* pAppDomain = pBaseDomain->AsAppDomain(); if (!pAppDomain->m_friendlyName.IsEmpty()) { if (!pAppDomain->m_friendlyName.DacGetUnicode(count, name, pNeeded)) { hr = E_FAIL; } } else { if (pNeeded) *pNeeded = 1; if (name) name[0] = 0; hr = S_OK; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetApplicationBase(CLRDATA_ADDRESS appDomain, int count, _Inout_updates_z_(count) WCHAR *base, unsigned int *pNeeded) { // Method is not supported on CoreCLR return E_FAIL; } HRESULT ClrDataAccess::GetPrivateBinPaths(CLRDATA_ADDRESS appDomain, int count, _Inout_updates_z_(count) WCHAR *paths, unsigned int *pNeeded) { // Method is not supported on CoreCLR return E_FAIL; } HRESULT ClrDataAccess::GetAppDomainConfigFile(CLRDATA_ADDRESS appDomain, int count, _Inout_updates_z_(count) WCHAR *configFile, unsigned int *pNeeded) { // Method is not supported on CoreCLR return E_FAIL; } HRESULT ClrDataAccess::GetAssemblyData(CLRDATA_ADDRESS cdBaseDomainPtr, CLRDATA_ADDRESS assembly, struct DacpAssemblyData *assemblyData) { if (assembly == NULL && cdBaseDomainPtr == NULL) { return E_INVALIDARG; } SOSDacEnter(); Assembly* pAssembly = PTR_Assembly(TO_TADDR(assembly)); // Make sure conditionally-assigned fields like AssemblySecDesc, LoadContext, etc. are zeroed ZeroMemory(assemblyData, sizeof(DacpAssemblyData)); // If the specified BaseDomain is an AppDomain, get a pointer to it AppDomain * pDomain = NULL; if (cdBaseDomainPtr != NULL) { assemblyData->BaseDomainPtr = cdBaseDomainPtr; PTR_BaseDomain baseDomain = PTR_BaseDomain(TO_TADDR(cdBaseDomainPtr)); if( baseDomain->IsAppDomain() ) pDomain = baseDomain->AsAppDomain(); } assemblyData->AssemblyPtr = HOST_CDADDR(pAssembly); assemblyData->ClassLoader = HOST_CDADDR(pAssembly->GetLoader()); assemblyData->ParentDomain = HOST_CDADDR(pAssembly->GetDomain()); assemblyData->isDynamic = pAssembly->IsDynamic(); assemblyData->ModuleCount = 0; assemblyData->isDomainNeutral = FALSE; if (pAssembly->GetModule()) { assemblyData->ModuleCount++; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAssemblyName(CLRDATA_ADDRESS assembly, unsigned int count, _Inout_updates_z_(count) WCHAR *name, unsigned int *pNeeded) { SOSDacEnter(); Assembly* pAssembly = PTR_Assembly(TO_TADDR(assembly)); if (name) name[0] = 0; if (!pAssembly->GetPEAssembly()->GetPath().IsEmpty()) { if (!pAssembly->GetPEAssembly()->GetPath().DacGetUnicode(count, name, pNeeded)) hr = E_FAIL; else if (name) name[count-1] = 0; } else if (!pAssembly->GetPEAssembly()->IsDynamic()) { StackSString displayName; pAssembly->GetPEAssembly()->GetDisplayName(displayName, 0); const WCHAR *val = displayName.GetUnicode(); if (pNeeded) *pNeeded = displayName.GetCount() + 1; if (name && count) { wcsncpy_s(name, count, val, _TRUNCATE); name[count-1] = 0; } } else { hr = E_FAIL; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAssemblyLocation(CLRDATA_ADDRESS assembly, int count, _Inout_updates_z_(count) WCHAR *location, unsigned int *pNeeded) { if ((assembly == NULL) || (location == NULL && pNeeded == NULL) || (location != NULL && count == 0)) { return E_INVALIDARG; } SOSDacEnter(); Assembly* pAssembly = PTR_Assembly(TO_TADDR(assembly)); // Turn from bytes to wide characters if (!pAssembly->GetPEAssembly()->GetPath().IsEmpty()) { if (!pAssembly->GetPEAssembly()->GetPath(). DacGetUnicode(count, location, pNeeded)) { hr = E_FAIL; } } else { if (location) location[0] = 0; if (pNeeded) *pNeeded = 1; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAssemblyModuleList(CLRDATA_ADDRESS assembly, unsigned int count, CLRDATA_ADDRESS modules[], unsigned int *pNeeded) { if (assembly == 0) return E_INVALIDARG; SOSDacEnter(); Assembly* pAssembly = PTR_Assembly(TO_TADDR(assembly)); if (modules) { if (pAssembly->GetModule() && count > 0) modules[0] = HOST_CDADDR(pAssembly->GetModule()); } if (pNeeded) *pNeeded = 1; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetGCHeapDetails(CLRDATA_ADDRESS heap, struct DacpGcHeapDetails *details) { if (heap == 0 || details == NULL) return E_INVALIDARG; SOSDacEnter(); // doesn't make sense to call this on WKS mode if (!GCHeapUtilities::IsServerHeap()) hr = E_INVALIDARG; else #ifdef FEATURE_SVR_GC hr = ServerGCHeapDetails(heap, details); #else hr = E_NOTIMPL; #endif SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetGCHeapStaticData(struct DacpGcHeapDetails *detailsData) { // Make sure ClrDataAccess::ServerGCHeapDetails() is updated as well. if (detailsData == NULL) { return E_INVALIDARG; } SOSDacEnter(); detailsData->heapAddr = NULL; detailsData->lowest_address = PTR_CDADDR(g_lowest_address); detailsData->highest_address = PTR_CDADDR(g_highest_address); detailsData->current_c_gc_state = (CLRDATA_ADDRESS)*g_gcDacGlobals->current_c_gc_state; detailsData->alloc_allocated = (CLRDATA_ADDRESS)*g_gcDacGlobals->alloc_allocated; detailsData->ephemeral_heap_segment = (CLRDATA_ADDRESS)*g_gcDacGlobals->ephemeral_heap_segment; detailsData->card_table = PTR_CDADDR(g_card_table); detailsData->mark_array = (CLRDATA_ADDRESS)*g_gcDacGlobals->mark_array; detailsData->next_sweep_obj = (CLRDATA_ADDRESS)*g_gcDacGlobals->next_sweep_obj; if (g_gcDacGlobals->saved_sweep_ephemeral_seg != nullptr) { detailsData->saved_sweep_ephemeral_seg = (CLRDATA_ADDRESS)*g_gcDacGlobals->saved_sweep_ephemeral_seg; detailsData->saved_sweep_ephemeral_start = (CLRDATA_ADDRESS)*g_gcDacGlobals->saved_sweep_ephemeral_start; } else { // with regions, we don't have these variables anymore // use special value -1 in saved_sweep_ephemeral_seg to signal the region case detailsData->saved_sweep_ephemeral_seg = (CLRDATA_ADDRESS)-1; detailsData->saved_sweep_ephemeral_start = 0; } detailsData->background_saved_lowest_address = (CLRDATA_ADDRESS)*g_gcDacGlobals->background_saved_lowest_address; detailsData->background_saved_highest_address = (CLRDATA_ADDRESS)*g_gcDacGlobals->background_saved_highest_address; // get bounds for the different generations for (unsigned int i=0; i < DAC_NUMBERGENERATIONS; i++) { dac_generation generation = GenerationTableIndex(g_gcDacGlobals->generation_table, i); detailsData->generation_table[i].start_segment = (CLRDATA_ADDRESS) dac_cast<TADDR>(generation.start_segment); detailsData->generation_table[i].allocation_start = (CLRDATA_ADDRESS) generation.allocation_start; gc_alloc_context alloc_context = generation.allocation_context; detailsData->generation_table[i].allocContextPtr = (CLRDATA_ADDRESS)alloc_context.alloc_ptr; detailsData->generation_table[i].allocContextLimit = (CLRDATA_ADDRESS)alloc_context.alloc_limit; } if (g_gcDacGlobals->finalize_queue.IsValid()) { DPTR(dac_finalize_queue) fq = Dereference(g_gcDacGlobals->finalize_queue); DPTR(uint8_t*) fillPointersTable = dac_cast<TADDR>(fq) + offsetof(dac_finalize_queue, m_FillPointers); for (unsigned int i = 0; i < DAC_NUMBERGENERATIONS + 3; i++) { detailsData->finalization_fill_pointers[i] = (CLRDATA_ADDRESS)*TableIndex(fillPointersTable, i, sizeof(uint8_t*)); } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetHeapSegmentData(CLRDATA_ADDRESS seg, struct DacpHeapSegmentData *heapSegment) { if (seg == 0 || heapSegment == NULL) return E_INVALIDARG; SOSDacEnter(); if (GCHeapUtilities::IsServerHeap()) { #if !defined(FEATURE_SVR_GC) _ASSERTE(0); #else // !defined(FEATURE_SVR_GC) hr = GetServerHeapData(seg, heapSegment); #endif //!defined(FEATURE_SVR_GC) } else { dac_heap_segment *pSegment = __DPtr<dac_heap_segment>(TO_TADDR(seg)); if (!pSegment) { hr = E_INVALIDARG; } else { heapSegment->segmentAddr = seg; heapSegment->allocated = (CLRDATA_ADDRESS)(ULONG_PTR) pSegment->allocated; heapSegment->committed = (CLRDATA_ADDRESS)(ULONG_PTR) pSegment->committed; heapSegment->reserved = (CLRDATA_ADDRESS)(ULONG_PTR) pSegment->reserved; heapSegment->used = (CLRDATA_ADDRESS)(ULONG_PTR) pSegment->used; heapSegment->mem = (CLRDATA_ADDRESS)(ULONG_PTR) pSegment->mem; heapSegment->next = (CLRDATA_ADDRESS)dac_cast<TADDR>(pSegment->next); heapSegment->flags = pSegment->flags; heapSegment->gc_heap = NULL; heapSegment->background_allocated = (CLRDATA_ADDRESS)(ULONG_PTR)pSegment->background_allocated; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetGCHeapList(unsigned int count, CLRDATA_ADDRESS heaps[], unsigned int *pNeeded) { SOSDacEnter(); // make sure we called this in appropriate circumstances (i.e., we have multiple heaps) if (GCHeapUtilities::IsServerHeap()) { #if !defined(FEATURE_SVR_GC) _ASSERTE(0); #else // !defined(FEATURE_SVR_GC) unsigned int heapCount = GCHeapCount(); if (pNeeded) *pNeeded = heapCount; if (heaps) { // get the heap locations if (count == heapCount) hr = GetServerHeaps(heaps, m_pTarget); else hr = E_INVALIDARG; } #endif // !defined(FEATURE_SVR_GC) } else { hr = E_FAIL; // doesn't make sense to call this on WKS mode } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetGCHeapData(struct DacpGcHeapData *gcheapData) { if (gcheapData == NULL) return E_INVALIDARG; SOSDacEnter(); // we need to check and see if g_heap_type // is GC_HEAP_INVALID, in which case we fail. ULONG32 gcHeapValue = g_heap_type; // GC_HEAP_TYPE has three possible values: // GC_HEAP_INVALID = 0, // GC_HEAP_WKS = 1, // GC_HEAP_SVR = 2 // If we get something other than that, we probably read the wrong location. _ASSERTE(gcHeapValue >= GC_HEAP_INVALID && gcHeapValue <= GC_HEAP_SVR); // we have GC_HEAP_INVALID if gcHeapValue == 0, so we're done - we haven't // initialized the heap yet. if (gcHeapValue == GC_HEAP_INVALID) { hr = E_FAIL; goto cleanup; } // Now we can get other important information about the heap // We can use GCHeapUtilities::IsServerHeap here because we have already validated // that the heap is in a valid state. We couldn't use it above, because IsServerHeap // asserts if the heap type is GC_HEAP_INVALID. gcheapData->g_max_generation = *g_gcDacGlobals->max_gen; gcheapData->bServerMode = GCHeapUtilities::IsServerHeap(); gcheapData->bGcStructuresValid = *g_gcDacGlobals->gc_structures_invalid_cnt == 0; if (GCHeapUtilities::IsServerHeap()) { #if !defined (FEATURE_SVR_GC) _ASSERTE(0); gcheapData->HeapCount = 1; #else // !defined (FEATURE_SVR_GC) gcheapData->HeapCount = GCHeapCount(); #endif // !defined (FEATURE_SVR_GC) } else { gcheapData->HeapCount = 1; } cleanup: ; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetOOMStaticData(struct DacpOomData *oomData) { if (oomData == NULL) return E_INVALIDARG; SOSDacEnter(); memset(oomData, 0, sizeof(DacpOomData)); if (!GCHeapUtilities::IsServerHeap()) { oom_history* pOOMInfo = g_gcDacGlobals->oom_info; oomData->reason = pOOMInfo->reason; oomData->alloc_size = pOOMInfo->alloc_size; oomData->available_pagefile_mb = pOOMInfo->available_pagefile_mb; oomData->gc_index = pOOMInfo->gc_index; oomData->fgm = pOOMInfo->fgm; oomData->size = pOOMInfo->size; oomData->loh_p = pOOMInfo->loh_p; } else { hr = E_FAIL; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetOOMData(CLRDATA_ADDRESS oomAddr, struct DacpOomData *data) { if (oomAddr == 0 || data == NULL) return E_INVALIDARG; SOSDacEnter(); memset(data, 0, sizeof(DacpOomData)); if (!GCHeapUtilities::IsServerHeap()) hr = E_FAIL; // doesn't make sense to call this on WKS mode #ifdef FEATURE_SVR_GC else hr = ServerOomData(oomAddr, data); #else _ASSERTE_MSG(false, "IsServerHeap returned true but FEATURE_SVR_GC not defined"); hr = E_NOTIMPL; #endif //FEATURE_SVR_GC SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetGCGlobalMechanisms(size_t* globalMechanisms) { #ifdef GC_CONFIG_DRIVEN if (globalMechanisms == NULL) return E_INVALIDARG; SOSDacEnter(); memset(globalMechanisms, 0, (sizeof(size_t) * MAX_GLOBAL_GC_MECHANISMS_COUNT)); for (int i = 0; i < MAX_GLOBAL_GC_MECHANISMS_COUNT; i++) { globalMechanisms[i] = g_gcDacGlobals->gc_global_mechanisms[i]; } SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif //GC_CONFIG_DRIVEN } HRESULT ClrDataAccess::GetGCInterestingInfoStaticData(struct DacpGCInterestingInfoData *data) { #ifdef GC_CONFIG_DRIVEN if (data == NULL) return E_INVALIDARG; static_assert_no_msg(DAC_NUMBERGENERATIONS == NUMBERGENERATIONS); static_assert_no_msg(DAC_NUM_GC_DATA_POINTS == NUM_GC_DATA_POINTS); static_assert_no_msg(DAC_MAX_COMPACT_REASONS_COUNT == MAX_COMPACT_REASONS_COUNT); static_assert_no_msg(DAC_MAX_EXPAND_MECHANISMS_COUNT == MAX_EXPAND_MECHANISMS_COUNT); static_assert_no_msg(DAC_MAX_GC_MECHANISM_BITS_COUNT == MAX_GC_MECHANISM_BITS_COUNT); SOSDacEnter(); memset(data, 0, sizeof(DacpGCInterestingInfoData)); if (g_heap_type != GC_HEAP_SVR) { for (int i = 0; i < NUM_GC_DATA_POINTS; i++) data->interestingDataPoints[i] = g_gcDacGlobals->interesting_data_per_heap[i]; for (int i = 0; i < MAX_COMPACT_REASONS_COUNT; i++) data->compactReasons[i] = g_gcDacGlobals->compact_reasons_per_heap[i]; for (int i = 0; i < MAX_EXPAND_MECHANISMS_COUNT; i++) data->expandMechanisms[i] = g_gcDacGlobals->expand_mechanisms_per_heap[i]; for (int i = 0; i < MAX_GC_MECHANISM_BITS_COUNT; i++) data->bitMechanisms[i] = g_gcDacGlobals->interesting_mechanism_bits_per_heap[i]; } else { hr = E_FAIL; } SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif //GC_CONFIG_DRIVEN } HRESULT ClrDataAccess::GetGCInterestingInfoData(CLRDATA_ADDRESS interestingInfoAddr, struct DacpGCInterestingInfoData *data) { #ifdef GC_CONFIG_DRIVEN if (interestingInfoAddr == 0 || data == NULL) return E_INVALIDARG; SOSDacEnter(); memset(data, 0, sizeof(DacpGCInterestingInfoData)); if (!GCHeapUtilities::IsServerHeap()) hr = E_FAIL; // doesn't make sense to call this on WKS mode #ifdef FEATURE_SVR_GC else hr = ServerGCInterestingInfoData(interestingInfoAddr, data); #else _ASSERTE_MSG(false, "IsServerHeap returned true but FEATURE_SVR_GC not defined"); hr = E_NOTIMPL; #endif //FEATURE_SVR_GC SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif //GC_CONFIG_DRIVEN } HRESULT ClrDataAccess::GetHeapAnalyzeData(CLRDATA_ADDRESS addr, struct DacpGcHeapAnalyzeData *data) { if (addr == 0 || data == NULL) return E_INVALIDARG; SOSDacEnter(); if (!GCHeapUtilities::IsServerHeap()) hr = E_FAIL; // doesn't make sense to call this on WKS mode #ifdef FEATURE_SVR_GC else hr = ServerGCHeapAnalyzeData(addr, data); #else _ASSERTE_MSG(false, "IsServerHeap returned true but FEATURE_SVR_GC not defined"); hr = E_NOTIMPL; #endif //FEATURE_SVR_GC SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetHeapAnalyzeStaticData(struct DacpGcHeapAnalyzeData *analyzeData) { if (analyzeData == NULL) return E_INVALIDARG; SOSDacEnter(); analyzeData->internal_root_array = dac_cast<TADDR>(g_gcDacGlobals->internal_root_array); analyzeData->internal_root_array_index = *g_gcDacGlobals->internal_root_array_index; analyzeData->heap_analyze_success = *g_gcDacGlobals->heap_analyze_success; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetUsefulGlobals(struct DacpUsefulGlobalsData *globalsData) { if (globalsData == NULL) return E_INVALIDARG; SOSDacEnter(); TypeHandle objArray = g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]; if (objArray != NULL) globalsData->ArrayMethodTable = HOST_CDADDR(objArray.AsMethodTable()); else globalsData->ArrayMethodTable = 0; globalsData->StringMethodTable = HOST_CDADDR(g_pStringClass); globalsData->ObjectMethodTable = HOST_CDADDR(g_pObjectClass); globalsData->ExceptionMethodTable = HOST_CDADDR(g_pExceptionClass); globalsData->FreeMethodTable = HOST_CDADDR(g_pFreeObjectMethodTable); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetNestedExceptionData(CLRDATA_ADDRESS exception, CLRDATA_ADDRESS *exceptionObject, CLRDATA_ADDRESS *nextNestedException) { if (exception == 0 || exceptionObject == NULL || nextNestedException == NULL) return E_INVALIDARG; SOSDacEnter(); #ifdef FEATURE_EH_FUNCLETS ExceptionTracker *pExData = PTR_ExceptionTracker(TO_TADDR(exception)); #else ExInfo *pExData = PTR_ExInfo(TO_TADDR(exception)); #endif // FEATURE_EH_FUNCLETS if (!pExData) { hr = E_INVALIDARG; } else { *exceptionObject = TO_CDADDR(*PTR_TADDR(pExData->m_hThrowable)); *nextNestedException = PTR_HOST_TO_TADDR(pExData->m_pPrevNestedInfo); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetDomainLocalModuleData(CLRDATA_ADDRESS addr, struct DacpDomainLocalModuleData *pLocalModuleData) { if (addr == 0 || pLocalModuleData == NULL) return E_INVALIDARG; SOSDacEnter(); DomainLocalModule* pLocalModule = PTR_DomainLocalModule(TO_TADDR(addr)); pLocalModuleData->pGCStaticDataStart = TO_CDADDR(PTR_TO_TADDR(pLocalModule->GetPrecomputedGCStaticsBasePointer())); pLocalModuleData->pNonGCStaticDataStart = TO_CDADDR(pLocalModule->GetPrecomputedNonGCStaticsBasePointer()); pLocalModuleData->pDynamicClassTable = PTR_CDADDR(pLocalModule->m_pDynamicClassTable.Load()); pLocalModuleData->pClassData = (TADDR) (PTR_HOST_MEMBER_TADDR(DomainLocalModule, pLocalModule, m_pDataBlob)); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetDomainLocalModuleDataFromModule(CLRDATA_ADDRESS addr, struct DacpDomainLocalModuleData *pLocalModuleData) { if (addr == 0 || pLocalModuleData == NULL) return E_INVALIDARG; SOSDacEnter(); Module* pModule = PTR_Module(TO_TADDR(addr)); DomainLocalModule* pLocalModule = PTR_DomainLocalModule(pModule->GetDomainLocalModule()); if (!pLocalModule) { hr = E_INVALIDARG; } else { pLocalModuleData->pGCStaticDataStart = TO_CDADDR(PTR_TO_TADDR(pLocalModule->GetPrecomputedGCStaticsBasePointer())); pLocalModuleData->pNonGCStaticDataStart = TO_CDADDR(pLocalModule->GetPrecomputedNonGCStaticsBasePointer()); pLocalModuleData->pDynamicClassTable = PTR_CDADDR(pLocalModule->m_pDynamicClassTable.Load()); pLocalModuleData->pClassData = (TADDR) (PTR_HOST_MEMBER_TADDR(DomainLocalModule, pLocalModule, m_pDataBlob)); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetDomainLocalModuleDataFromAppDomain(CLRDATA_ADDRESS appDomainAddr, int moduleID, struct DacpDomainLocalModuleData *pLocalModuleData) { // CoreCLR does not support multi-appdomain shared assembly loading. Thus, a non-pointer sized moduleID cannot exist. return E_INVALIDARG; } HRESULT ClrDataAccess::GetThreadLocalModuleData(CLRDATA_ADDRESS thread, unsigned int index, struct DacpThreadLocalModuleData *pLocalModuleData) { if (pLocalModuleData == NULL) return E_INVALIDARG; SOSDacEnter(); pLocalModuleData->threadAddr = thread; pLocalModuleData->ModuleIndex = index; PTR_Thread pThread = PTR_Thread(TO_TADDR(thread)); PTR_ThreadLocalBlock pLocalBlock = ThreadStatics::GetCurrentTLB(pThread); PTR_ThreadLocalModule pLocalModule = pLocalBlock->GetTLMIfExists(ModuleIndex(index)); if (!pLocalModule) { hr = E_INVALIDARG; } else { pLocalModuleData->pGCStaticDataStart = TO_CDADDR(PTR_TO_TADDR(pLocalModule->GetPrecomputedGCStaticsBasePointer())); pLocalModuleData->pNonGCStaticDataStart = TO_CDADDR(pLocalModule->GetPrecomputedNonGCStaticsBasePointer()); pLocalModuleData->pDynamicClassTable = PTR_CDADDR(pLocalModule->m_pDynamicClassTable); pLocalModuleData->pClassData = (TADDR) (PTR_HOST_MEMBER_TADDR(ThreadLocalModule, pLocalModule, m_pDataBlob)); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetHandleEnum(ISOSHandleEnum **ppHandleEnum) { unsigned int types[] = {HNDTYPE_WEAK_SHORT, HNDTYPE_WEAK_LONG, HNDTYPE_STRONG, HNDTYPE_PINNED, HNDTYPE_VARIABLE, HNDTYPE_DEPENDENT, HNDTYPE_ASYNCPINNED, HNDTYPE_SIZEDREF, #if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS) || defined(FEATURE_OBJCMARSHAL) HNDTYPE_REFCOUNTED, #endif // FEATURE_COMINTEROP || FEATURE_COMWRAPPERS || FEATURE_OBJCMARSHAL #if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS) HNDTYPE_WEAK_NATIVE_COM #endif // FEATURE_COMINTEROP }; return GetHandleEnumForTypes(types, ARRAY_SIZE(types), ppHandleEnum); } HRESULT ClrDataAccess::GetHandleEnumForTypes(unsigned int types[], unsigned int count, ISOSHandleEnum **ppHandleEnum) { if (ppHandleEnum == 0) return E_POINTER; SOSDacEnter(); DacHandleWalker *walker = new DacHandleWalker(); HRESULT hr = walker->Init(this, types, count); if (SUCCEEDED(hr)) hr = walker->QueryInterface(__uuidof(ISOSHandleEnum), (void**)ppHandleEnum); if (FAILED(hr)) delete walker; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetHandleEnumForGC(unsigned int gen, ISOSHandleEnum **ppHandleEnum) { if (ppHandleEnum == 0) return E_POINTER; SOSDacEnter(); unsigned int types[] = {HNDTYPE_WEAK_SHORT, HNDTYPE_WEAK_LONG, HNDTYPE_STRONG, HNDTYPE_PINNED, HNDTYPE_VARIABLE, HNDTYPE_DEPENDENT, HNDTYPE_ASYNCPINNED, HNDTYPE_SIZEDREF, #if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS) || defined(FEATURE_OBJCMARSHAL) HNDTYPE_REFCOUNTED, #endif // FEATURE_COMINTEROP || FEATURE_COMWRAPPERS || FEATURE_OBJCMARSHAL #if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS) HNDTYPE_WEAK_NATIVE_COM #endif // FEATURE_COMINTEROP || FEATURE_COMWRAPPERS }; DacHandleWalker *walker = new DacHandleWalker(); HRESULT hr = walker->Init(this, types, ARRAY_SIZE(types), gen); if (SUCCEEDED(hr)) hr = walker->QueryInterface(__uuidof(ISOSHandleEnum), (void**)ppHandleEnum); if (FAILED(hr)) delete walker; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::TraverseEHInfo(CLRDATA_ADDRESS ip, DUMPEHINFO pFunc, LPVOID token) { if (ip == 0 || pFunc == NULL) return E_INVALIDARG; SOSDacEnter(); EECodeInfo codeInfo(TO_TADDR(ip)); if (!codeInfo.IsValid()) { hr = E_INVALIDARG; } if (SUCCEEDED(hr)) { EH_CLAUSE_ENUMERATOR EnumState; EE_ILEXCEPTION_CLAUSE EHClause; unsigned EHCount; EHCount = codeInfo.GetJitManager()->InitializeEHEnumeration(codeInfo.GetMethodToken(), &EnumState); for (unsigned i = 0; i < EHCount; i++) { codeInfo.GetJitManager()->GetNextEHClause(&EnumState, &EHClause); DACEHInfo deh; ZeroMemory(&deh,sizeof(deh)); if (IsFault(&EHClause)) { deh.clauseType = EHFault; } else if (IsFinally(&EHClause)) { deh.clauseType = EHFinally; } else if (IsFilterHandler(&EHClause)) { deh.clauseType = EHFilter; deh.filterOffset = EHClause.FilterOffset; } else if (IsTypedHandler(&EHClause)) { deh.clauseType = EHTyped; deh.isCatchAllHandler = (&EHClause.TypeHandle == (void*)(size_t)mdTypeRefNil); } else { deh.clauseType = EHUnknown; } if (HasCachedTypeHandle(&EHClause)) { deh.mtCatch = TO_CDADDR(&EHClause.TypeHandle); } else if(!IsFaultOrFinally(&EHClause)) { // the module of the token (whether a ref or def token) is the same as the module of the method containing the EH clause deh.moduleAddr = HOST_CDADDR(codeInfo.GetMethodDesc()->GetModule()); deh.tokCatch = EHClause.ClassToken; } deh.tryStartOffset = EHClause.TryStartPC; deh.tryEndOffset = EHClause.TryEndPC; deh.handlerStartOffset = EHClause.HandlerStartPC; deh.handlerEndOffset = EHClause.HandlerEndPC; deh.isDuplicateClause = IsDuplicateClause(&EHClause); if (!(pFunc)(i, EHCount, &deh, token)) { // User wants to stop the enumeration hr = E_ABORT; break; } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::TraverseRCWCleanupList(CLRDATA_ADDRESS cleanupListPtr, VISITRCWFORCLEANUP pFunc, LPVOID token) { #ifdef FEATURE_COMINTEROP if (pFunc == 0) return E_INVALIDARG; SOSDacEnter(); RCWCleanupList *pList = g_pRCWCleanupList; if (cleanupListPtr) { pList = PTR_RCWCleanupList(TO_TADDR(cleanupListPtr)); } if (pList) { PTR_RCW pBucket = dac_cast<PTR_RCW>(TO_TADDR(pList->m_pFirstBucket)); while (pBucket != NULL) { PTR_RCW pRCW = pBucket; Thread *pSTAThread = pRCW->GetSTAThread(); LPVOID pCtxCookie = pRCW->GetWrapperCtxCookie(); BOOL bIsFreeThreaded = pRCW->IsFreeThreaded(); while (pRCW) { (pFunc)(HOST_CDADDR(pRCW),(CLRDATA_ADDRESS)pCtxCookie, (CLRDATA_ADDRESS)(TADDR)pSTAThread, bIsFreeThreaded, token); pRCW = pRCW->m_pNextRCW; } pBucket = pBucket->m_pNextCleanupBucket; } } SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif // FEATURE_COMINTEROP } HRESULT ClrDataAccess::TraverseLoaderHeap(CLRDATA_ADDRESS loaderHeapAddr, VISITHEAP pFunc) { if (loaderHeapAddr == 0 || pFunc == 0) return E_INVALIDARG; SOSDacEnter(); LoaderHeap *pLoaderHeap = PTR_LoaderHeap(TO_TADDR(loaderHeapAddr)); PTR_LoaderHeapBlock block = pLoaderHeap->m_pFirstBlock; while (block.IsValid()) { TADDR addr = PTR_TO_TADDR(block->pVirtualAddress); size_t size = block->dwVirtualSize; BOOL bCurrentBlock = (block == pLoaderHeap->m_pFirstBlock); pFunc(addr,size,bCurrentBlock); block = block->pNext; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::TraverseVirtCallStubHeap(CLRDATA_ADDRESS pAppDomain, VCSHeapType heaptype, VISITHEAP pFunc) { if (pAppDomain == 0) return E_INVALIDARG; SOSDacEnter(); BaseDomain* pBaseDomain = PTR_BaseDomain(TO_TADDR(pAppDomain)); VirtualCallStubManager *pVcsMgr = PTR_VirtualCallStubManager((TADDR)pBaseDomain->GetLoaderAllocator()->GetVirtualCallStubManager()); if (!pVcsMgr) { hr = E_POINTER; } else { LoaderHeap *pLoaderHeap = NULL; switch(heaptype) { case IndcellHeap: pLoaderHeap = pVcsMgr->indcell_heap; break; case LookupHeap: pLoaderHeap = pVcsMgr->lookup_heap; break; case ResolveHeap: pLoaderHeap = pVcsMgr->resolve_heap; break; case DispatchHeap: pLoaderHeap = pVcsMgr->dispatch_heap; break; case CacheEntryHeap: pLoaderHeap = pVcsMgr->cache_entry_heap; break; default: hr = E_INVALIDARG; } if (SUCCEEDED(hr)) { PTR_LoaderHeapBlock block = pLoaderHeap->m_pFirstBlock; while (block.IsValid()) { TADDR addr = PTR_TO_TADDR(block->pVirtualAddress); size_t size = block->dwVirtualSize; BOOL bCurrentBlock = (block == pLoaderHeap->m_pFirstBlock); pFunc(addr, size, bCurrentBlock); block = block->pNext; } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetSyncBlockData(unsigned int SBNumber, struct DacpSyncBlockData *pSyncBlockData) { if (pSyncBlockData == NULL) return E_INVALIDARG; SOSDacEnter(); ZeroMemory(pSyncBlockData,sizeof(DacpSyncBlockData)); pSyncBlockData->SyncBlockCount = (SyncBlockCache::s_pSyncBlockCache->m_FreeSyncTableIndex) - 1; PTR_SyncTableEntry ste = PTR_SyncTableEntry(dac_cast<TADDR>(g_pSyncTable)+(sizeof(SyncTableEntry) * SBNumber)); pSyncBlockData->bFree = ((dac_cast<TADDR>(ste->m_Object.Load())) & 1); if (pSyncBlockData->bFree == FALSE) { pSyncBlockData->Object = (CLRDATA_ADDRESS)dac_cast<TADDR>(ste->m_Object.Load()); if (ste->m_SyncBlock != NULL) { SyncBlock *pBlock = PTR_SyncBlock(ste->m_SyncBlock); pSyncBlockData->SyncBlockPointer = HOST_CDADDR(pBlock); #ifdef FEATURE_COMINTEROP if (pBlock->m_pInteropInfo) { pSyncBlockData->COMFlags |= (pBlock->m_pInteropInfo->DacGetRawRCW() != 0) ? SYNCBLOCKDATA_COMFLAGS_RCW : 0; pSyncBlockData->COMFlags |= (pBlock->m_pInteropInfo->GetCCW() != NULL) ? SYNCBLOCKDATA_COMFLAGS_CCW : 0; #ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION pSyncBlockData->COMFlags |= (pBlock->m_pInteropInfo->GetComClassFactory() != NULL) ? SYNCBLOCKDATA_COMFLAGS_CF : 0; #endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION } #endif // FEATURE_COMINTEROP pSyncBlockData->MonitorHeld = pBlock->m_Monitor.GetMonitorHeldStateVolatile(); pSyncBlockData->Recursion = pBlock->m_Monitor.GetRecursionLevel(); pSyncBlockData->HoldingThread = HOST_CDADDR(pBlock->m_Monitor.GetHoldingThread()); pSyncBlockData->appDomainPtr = PTR_HOST_TO_TADDR(AppDomain::GetCurrentDomain()); // TODO: Microsoft, implement the wait list pSyncBlockData->AdditionalThreadCount = 0; if (pBlock->m_Link.m_pNext != NULL) { PTR_SLink pLink = pBlock->m_Link.m_pNext; do { pSyncBlockData->AdditionalThreadCount++; pLink = pBlock->m_Link.m_pNext; } while ((pLink != NULL) && (pSyncBlockData->AdditionalThreadCount < 1000)); } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetSyncBlockCleanupData(CLRDATA_ADDRESS syncBlock, struct DacpSyncBlockCleanupData *syncBlockCData) { if (syncBlock == 0 || syncBlockCData == NULL) return E_INVALIDARG; SOSDacEnter(); ZeroMemory (syncBlockCData, sizeof(DacpSyncBlockCleanupData)); SyncBlock *pBlock = NULL; if (syncBlock == NULL && SyncBlockCache::s_pSyncBlockCache->m_pCleanupBlockList) { pBlock = (SyncBlock *) PTR_SyncBlock( PTR_HOST_TO_TADDR(SyncBlockCache::s_pSyncBlockCache->m_pCleanupBlockList) - offsetof(SyncBlock, m_Link)); } else { pBlock = PTR_SyncBlock(TO_TADDR(syncBlock)); } if (pBlock) { syncBlockCData->SyncBlockPointer = HOST_CDADDR(pBlock); if (pBlock->m_Link.m_pNext) { syncBlockCData->nextSyncBlock = (CLRDATA_ADDRESS) (PTR_HOST_TO_TADDR(pBlock->m_Link.m_pNext) - offsetof(SyncBlock, m_Link)); } #ifdef FEATURE_COMINTEROP if (pBlock->m_pInteropInfo->DacGetRawRCW()) syncBlockCData->blockRCW = (CLRDATA_ADDRESS) pBlock->m_pInteropInfo->DacGetRawRCW(); #ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION if (pBlock->m_pInteropInfo->GetComClassFactory()) syncBlockCData->blockClassFactory = (CLRDATA_ADDRESS) (TADDR) pBlock->m_pInteropInfo->GetComClassFactory(); #endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION if (pBlock->m_pInteropInfo->GetCCW()) syncBlockCData->blockCCW = (CLRDATA_ADDRESS) dac_cast<TADDR>(pBlock->m_pInteropInfo->GetCCW()); #endif // FEATURE_COMINTEROP } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetJitHelperFunctionName(CLRDATA_ADDRESS ip, unsigned int count, _Inout_updates_z_(count) char *name, unsigned int *pNeeded) { SOSDacEnter(); PCSTR pszHelperName = GetJitHelperName(TO_TADDR(ip)); if (pszHelperName == NULL) { hr = E_INVALIDARG; } else { unsigned int len = (unsigned int)strlen(pszHelperName) + 1; if (pNeeded) *pNeeded = len; if (name) { if (count < len) hr = E_FAIL; else strcpy_s(name, count, pszHelperName); } } SOSDacLeave(); return hr; }; HRESULT ClrDataAccess::GetJumpThunkTarget(T_CONTEXT *ctx, CLRDATA_ADDRESS *targetIP, CLRDATA_ADDRESS *targetMD) { if (ctx == NULL || targetIP == NULL || targetMD == NULL) return E_INVALIDARG; #ifdef TARGET_AMD64 SOSDacEnter(); if (!GetAnyThunkTarget(ctx, targetIP, targetMD)) hr = E_FAIL; SOSDacLeave(); return hr; #else return E_FAIL; #endif // TARGET_AMD64 } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:21000) // Suppress PREFast warning about overly large function #endif STDMETHODIMP ClrDataAccess::Request(IN ULONG32 reqCode, IN ULONG32 inBufferSize, IN BYTE* inBuffer, IN ULONG32 outBufferSize, OUT BYTE* outBuffer) { HRESULT status; DAC_ENTER(); EX_TRY { switch(reqCode) { case CLRDATA_REQUEST_REVISION: if (inBufferSize != 0 || inBuffer || outBufferSize != sizeof(ULONG32)) { status = E_INVALIDARG; } else { *(ULONG32*)outBuffer = 9; status = S_OK; } break; default: status = E_INVALIDARG; break; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } #ifdef _PREFAST_ #pragma warning(pop) #endif void ClrDataAccess::EnumWksGlobalMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; #ifdef FEATURE_SVR_GC // If server GC, skip enumeration if (g_gcDacGlobals->g_heaps != nullptr) return; #endif Dereference(g_gcDacGlobals->ephemeral_heap_segment).EnumMem(); g_gcDacGlobals->alloc_allocated.EnumMem(); g_gcDacGlobals->gc_structures_invalid_cnt.EnumMem(); Dereference(g_gcDacGlobals->finalize_queue).EnumMem(); // Enumerate the entire generation table, which has variable size EnumGenerationTable(dac_cast<TADDR>(g_gcDacGlobals->generation_table)); if (g_gcDacGlobals->generation_table.IsValid()) { ULONG first = IsRegionGCEnabled() ? 0 : (*g_gcDacGlobals->max_gen); // enumerating the first to max + 2 gives you // the segment list for all the normal segments plus the pinned heap segment (max + 2) // this is the convention in the GC so it is repeated here for (ULONG i = first; i <= *g_gcDacGlobals->max_gen + 2; i++) { dac_generation gen = GenerationTableIndex(g_gcDacGlobals->generation_table, i); __DPtr<dac_heap_segment> seg = dac_cast<TADDR>(gen.start_segment); while (seg) { DacEnumMemoryRegion(dac_cast<TADDR>(seg), sizeof(dac_heap_segment)); seg = seg->next; } } } } HRESULT ClrDataAccess::GetClrWatsonBuckets(CLRDATA_ADDRESS thread, void *pGenericModeBlock) { #ifdef TARGET_UNIX // This API is not available under TARGET_UNIX return E_FAIL; #else // TARGET_UNIX if (thread == 0 || pGenericModeBlock == NULL) return E_INVALIDARG; SOSDacEnter(); Thread * pThread = PTR_Thread(TO_TADDR(thread)); hr = GetClrWatsonBucketsWorker(pThread, reinterpret_cast<GenericModeBlock *>(pGenericModeBlock)); SOSDacLeave(); return hr; #endif // TARGET_UNIX } #ifndef TARGET_UNIX HRESULT ClrDataAccess::GetClrWatsonBucketsWorker(Thread * pThread, GenericModeBlock * pGM) { if ((pThread == NULL) || (pGM == NULL)) { return E_INVALIDARG; } // By default, there are no buckets PTR_VOID pBuckets = NULL; // Get the handle to the throwble OBJECTHANDLE ohThrowable = pThread->GetThrowableAsHandle(); if (ohThrowable != NULL) { // Get the object from handle and check if the throwable is preallocated or not OBJECTREF oThrowable = ObjectFromHandle(ohThrowable); if (oThrowable != NULL) { // Does the throwable have buckets? U1ARRAYREF refWatsonBucketArray = ((EXCEPTIONREF)oThrowable)->GetWatsonBucketReference(); if (refWatsonBucketArray != NULL) { // Get the watson buckets from the throwable for non-preallocated // exceptions pBuckets = dac_cast<PTR_VOID>(refWatsonBucketArray->GetDataPtr()); } else { // This is a preallocated exception object - check if the UE Watson bucket tracker // has any bucket details pBuckets = pThread->GetExceptionState()->GetUEWatsonBucketTracker()->RetrieveWatsonBuckets(); if (pBuckets == NULL) { // Since the UE watson bucket tracker does not have them, look up the current // exception tracker if (pThread->GetExceptionState()->GetCurrentExceptionTracker() != NULL) { pBuckets = pThread->GetExceptionState()->GetCurrentExceptionTracker()->GetWatsonBucketTracker()->RetrieveWatsonBuckets(); } } } } } else { // Debuger.Break doesn't have a throwable, but saves Watson buckets in EHWatsonBucketTracker. pBuckets = pThread->GetExceptionState()->GetUEWatsonBucketTracker()->RetrieveWatsonBuckets(); } // If pBuckets is non-null, it is the address of a Watson GenericModeBlock in the target process. if (pBuckets != NULL) { ULONG32 returned = 0; HRESULT hr = m_pTarget->ReadVirtual(dac_cast<TADDR>(pBuckets), reinterpret_cast<BYTE *>(pGM), sizeof(*pGM), &returned); if (FAILED(hr)) { hr = CORDBG_E_READVIRTUAL_FAILURE; } if (SUCCEEDED(hr) && (returned != sizeof(*pGM))) { hr = HRESULT_FROM_WIN32(ERROR_PARTIAL_COPY); } return hr; } else { // Buckets are not available return S_FALSE; } } #endif // TARGET_UNIX HRESULT ClrDataAccess::GetTLSIndex(ULONG *pIndex) { if (pIndex == NULL) return E_INVALIDARG; SOSDacEnter(); if (g_TlsIndex == TLS_OUT_OF_INDEXES) { *pIndex = 0; hr = S_FALSE; } else { *pIndex = g_TlsIndex; } SOSDacLeave(); return hr; } #ifndef TARGET_UNIX extern "C" IMAGE_DOS_HEADER __ImageBase; #endif HRESULT ClrDataAccess::GetDacModuleHandle(HMODULE *phModule) { if(phModule == NULL) return E_INVALIDARG; #ifndef TARGET_UNIX *phModule = (HMODULE)&__ImageBase; return S_OK; #else // hModule is not available under TARGET_UNIX return E_FAIL; #endif } HRESULT ClrDataAccess::GetRCWData(CLRDATA_ADDRESS addr, struct DacpRCWData *rcwData) { if (addr == 0 || rcwData == NULL) return E_INVALIDARG; #ifdef FEATURE_COMINTEROP SOSDacEnter(); ZeroMemory (rcwData, sizeof(DacpRCWData)); PTR_RCW pRCW = dac_cast<PTR_RCW>(CLRDATA_ADDRESS_TO_TADDR(addr)); rcwData->identityPointer = TO_CDADDR(pRCW->m_pIdentity); rcwData->unknownPointer = TO_CDADDR(pRCW->GetRawIUnknown_NoAddRef()); rcwData->vtablePtr = TO_CDADDR(pRCW->m_vtablePtr); rcwData->creatorThread = TO_CDADDR(pRCW->m_pCreatorThread); rcwData->ctxCookie = TO_CDADDR(pRCW->GetWrapperCtxCookie()); rcwData->refCount = pRCW->m_cbRefCount; rcwData->isAggregated = pRCW->IsURTAggregated(); rcwData->isContained = pRCW->IsURTContained(); rcwData->isFreeThreaded = pRCW->IsFreeThreaded(); rcwData->isDisconnected = pRCW->IsDisconnected(); if (pRCW->m_SyncBlockIndex != 0) { PTR_SyncTableEntry ste = PTR_SyncTableEntry(dac_cast<TADDR>(g_pSyncTable) + (sizeof(SyncTableEntry) * pRCW->m_SyncBlockIndex)); rcwData->managedObject = PTR_CDADDR(ste->m_Object.Load()); } // count the number of cached interface pointers rcwData->interfaceCount = 0; RCW::CachedInterfaceEntryIterator it = pRCW->IterateCachedInterfacePointers(); while (it.Next()) { if (it.GetEntry()->m_pUnknown.Load() != NULL) rcwData->interfaceCount++; } SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif } HRESULT ClrDataAccess::GetRCWInterfaces(CLRDATA_ADDRESS rcw, unsigned int count, struct DacpCOMInterfacePointerData interfaces[], unsigned int *pNeeded) { if (rcw == 0) return E_INVALIDARG; #ifdef FEATURE_COMINTEROP SOSDacEnter(); PTR_RCW pRCW = dac_cast<PTR_RCW>(CLRDATA_ADDRESS_TO_TADDR(rcw)); if (interfaces == NULL) { if (pNeeded) { unsigned int c = 0; RCW::CachedInterfaceEntryIterator it = pRCW->IterateCachedInterfacePointers(); while (it.Next()) { if (it.GetEntry()->m_pUnknown.Load() != NULL) c++; } *pNeeded = c; } else { hr = E_INVALIDARG; } } else { ZeroMemory(interfaces, sizeof(DacpCOMInterfacePointerData) * count); unsigned int itemIndex = 0; RCW::CachedInterfaceEntryIterator it = pRCW->IterateCachedInterfacePointers(); while (it.Next()) { InterfaceEntry *pEntry = it.GetEntry(); if (pEntry->m_pUnknown.Load() != NULL) { if (itemIndex >= count) { // the outBuffer is too small hr = E_INVALIDARG; break; } else { interfaces[itemIndex].interfacePtr = TO_CDADDR(pEntry->m_pUnknown.Load()); interfaces[itemIndex].methodTable = TO_CDADDR(pEntry->m_pMT.Load()); interfaces[itemIndex].comContext = TO_CDADDR(it.GetCtxCookie()); itemIndex++; } } } if (SUCCEEDED(hr) && pNeeded) *pNeeded = itemIndex; } SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif } #ifdef FEATURE_COMINTEROP PTR_ComCallWrapper ClrDataAccess::DACGetCCWFromAddress(CLRDATA_ADDRESS addr) { PTR_ComCallWrapper pCCW = NULL; // first check whether the address is our COM IP TADDR pPtr = CLRDATA_ADDRESS_TO_TADDR(addr); ULONG32 returned = 0; if (m_pTarget->ReadVirtual(pPtr, (PBYTE)&pPtr, sizeof(TADDR), &returned) == S_OK && returned == sizeof(TADDR)) { // this should be the vtable pointer - dereference the 2nd slot if (m_pTarget->ReadVirtual(pPtr + sizeof(PBYTE) * TEAR_OFF_SLOT, (PBYTE)&pPtr, sizeof(TADDR), &returned) == S_OK && returned == sizeof(TADDR)) { #ifdef TARGET_ARM // clear the THUMB bit on pPtr before comparing with known vtable entry pPtr &= ~THUMB_CODE; #endif if (pPtr == GetEEFuncEntryPoint(TEAR_OFF_STANDARD)) { // Points to ComCallWrapper PTR_IUnknown pUnk(CLRDATA_ADDRESS_TO_TADDR(addr)); pCCW = ComCallWrapper::GetWrapperFromIP(pUnk); } else if (pPtr == GetEEFuncEntryPoint(TEAR_OFF_SIMPLE) || pPtr == GetEEFuncEntryPoint(TEAR_OFF_SIMPLE_INNER)) { // Points to SimpleComCallWrapper PTR_IUnknown pUnk(CLRDATA_ADDRESS_TO_TADDR(addr)); pCCW = SimpleComCallWrapper::GetWrapperFromIP(pUnk)->GetMainWrapper(); } } } if (pCCW == NULL) { // no luck interpreting the address as a COM interface pointer - it must be a CCW address pCCW = dac_cast<PTR_ComCallWrapper>(CLRDATA_ADDRESS_TO_TADDR(addr)); } if (pCCW->IsLinked()) pCCW = ComCallWrapper::GetStartWrapper(pCCW); return pCCW; } PTR_IUnknown ClrDataAccess::DACGetCOMIPFromCCW(PTR_ComCallWrapper pCCW, int vtableIndex) { if (pCCW->m_rgpIPtr[vtableIndex] != NULL) { PTR_IUnknown pUnk = dac_cast<PTR_IUnknown>(dac_cast<TADDR>(pCCW) + offsetof(ComCallWrapper, m_rgpIPtr[vtableIndex])); PTR_ComMethodTable pCMT = ComMethodTable::ComMethodTableFromIP(pUnk); if (pCMT->IsLayoutComplete()) { // return only fully laid out vtables return pUnk; } } return NULL; } #endif #ifdef FEATURE_COMWRAPPERS BOOL ClrDataAccess::DACGetComWrappersCCWVTableQIAddress(CLRDATA_ADDRESS ccwPtr, TADDR *vTableAddress, TADDR *qiAddress) { _ASSERTE(vTableAddress != NULL && qiAddress != NULL); HRESULT hr = S_OK; ULONG32 bytesRead = 0; TADDR ccw = CLRDATA_ADDRESS_TO_TADDR(ccwPtr); *vTableAddress = NULL; if (FAILED(m_pTarget->ReadVirtual(ccw, (PBYTE)vTableAddress, sizeof(TADDR), &bytesRead)) || bytesRead != sizeof(TADDR) || vTableAddress == NULL) { return FALSE; } *qiAddress = NULL; if (FAILED(m_pTarget->ReadVirtual(*vTableAddress, (PBYTE)qiAddress, sizeof(TADDR), &bytesRead)) || bytesRead != sizeof(TADDR) || qiAddress == NULL) { return FALSE; } #ifdef TARGET_ARM // clear the THUMB bit on qiAddress before comparing with known vtable entry *qiAddress &= ~THUMB_CODE; #endif return TRUE; } BOOL ClrDataAccess::DACIsComWrappersCCW(CLRDATA_ADDRESS ccwPtr) { TADDR vTableAddress = NULL; TADDR qiAddress = NULL; if (!DACGetComWrappersCCWVTableQIAddress(ccwPtr, &vTableAddress, &qiAddress)) { return FALSE; } return (qiAddress == GetEEFuncEntryPoint(ManagedObjectWrapper_QueryInterface) || qiAddress == GetEEFuncEntryPoint(TrackerTarget_QueryInterface)); } TADDR ClrDataAccess::DACGetManagedObjectWrapperFromCCW(CLRDATA_ADDRESS ccwPtr) { if (!DACIsComWrappersCCW(ccwPtr)) { return NULL; } ULONG32 bytesRead = 0; TADDR managedObjectWrapperPtrPtr = ccwPtr & InteropLib::ABI::DispatchThisPtrMask; TADDR managedObjectWrapperPtr = 0; if (FAILED(m_pTarget->ReadVirtual(managedObjectWrapperPtrPtr, (PBYTE)&managedObjectWrapperPtr, sizeof(TADDR), &bytesRead)) || bytesRead != sizeof(TADDR)) { return NULL; } return managedObjectWrapperPtr; } HRESULT ClrDataAccess::DACTryGetComWrappersHandleFromCCW(CLRDATA_ADDRESS ccwPtr, OBJECTHANDLE* objHandle) { HRESULT hr = E_FAIL; TADDR ccw, managedObjectWrapperPtr; ULONG32 bytesRead = 0; OBJECTHANDLE handle; if (ccwPtr == 0 || objHandle == NULL) { hr = E_INVALIDARG; goto ErrExit; } if (!DACIsComWrappersCCW(ccwPtr)) { hr = E_FAIL; goto ErrExit; } ccw = CLRDATA_ADDRESS_TO_TADDR(ccwPtr); // Return ManagedObjectWrapper as an OBJECTHANDLE. (The OBJECTHANDLE is guaranteed to live at offset 0). managedObjectWrapperPtr = DACGetManagedObjectWrapperFromCCW(ccwPtr); if (managedObjectWrapperPtr == NULL) { hr = E_FAIL; goto ErrExit; } IfFailGo(m_pTarget->ReadVirtual(managedObjectWrapperPtr, (PBYTE)&handle, sizeof(OBJECTHANDLE), &bytesRead)); if (bytesRead != sizeof(OBJECTHANDLE)) { hr = E_FAIL; goto ErrExit; } *objHandle = handle; return S_OK; ErrExit: return hr; } HRESULT ClrDataAccess::DACTryGetComWrappersObjectFromCCW(CLRDATA_ADDRESS ccwPtr, OBJECTREF* objRef) { HRESULT hr = E_FAIL; if (ccwPtr == 0 || objRef == NULL) { hr = E_INVALIDARG; goto ErrExit; } OBJECTHANDLE handle; if (DACTryGetComWrappersHandleFromCCW(ccwPtr, &handle) != S_OK) { hr = E_FAIL; goto ErrExit; } *objRef = ObjectFromHandle(handle); return S_OK; ErrExit: return hr; } #endif HRESULT ClrDataAccess::GetCCWData(CLRDATA_ADDRESS ccw, struct DacpCCWData *ccwData) { if (ccw == 0 || ccwData == NULL) return E_INVALIDARG; #ifdef FEATURE_COMINTEROP SOSDacEnter(); ZeroMemory (ccwData, sizeof(DacpCCWData)); PTR_ComCallWrapper pCCW = DACGetCCWFromAddress(ccw); PTR_SimpleComCallWrapper pSimpleCCW = pCCW->GetSimpleWrapper(); ccwData->outerIUnknown = TO_CDADDR(pSimpleCCW->m_pOuter); ccwData->refCount = pSimpleCCW->GetRefCount(); ccwData->isNeutered = pSimpleCCW->IsNeutered(); ccwData->ccwAddress = TO_CDADDR(dac_cast<TADDR>(pCCW)); ccwData->hasStrongRef = pCCW->IsWrapperActive(); ccwData->handle = pCCW->GetObjectHandle(); ccwData->isExtendsCOMObject = pCCW->GetSimpleWrapper()->IsExtendsCOMObject(); ccwData->isAggregated = pCCW->GetSimpleWrapper()->IsAggregated(); if (pCCW->GetObjectHandle() != NULL) ccwData->managedObject = PTR_CDADDR(ObjectFromHandle(pCCW->GetObjectHandle())); // count the number of COM vtables ccwData->interfaceCount = 0; while (pCCW != NULL) { for (int i = 0; i < ComCallWrapper::NumVtablePtrs; i++) { if (DACGetCOMIPFromCCW(pCCW, i) != NULL) ccwData->interfaceCount++; } pCCW = ComCallWrapper::GetNext(pCCW); } SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif } HRESULT ClrDataAccess::GetCCWInterfaces(CLRDATA_ADDRESS ccw, unsigned int count, struct DacpCOMInterfacePointerData interfaces[], unsigned int *pNeeded) { if (ccw == 0) return E_INVALIDARG; #ifdef FEATURE_COMINTEROP SOSDacEnter(); PTR_ComCallWrapper pCCW = DACGetCCWFromAddress(ccw); if (interfaces == NULL) { if (pNeeded) { unsigned int c = 0; while (pCCW != NULL) { for (int i = 0; i < ComCallWrapper::NumVtablePtrs; i++) if (DACGetCOMIPFromCCW(pCCW, i) != NULL) c++; pCCW = ComCallWrapper::GetNext(pCCW); } *pNeeded = c; } else { hr = E_INVALIDARG; } } else { ZeroMemory(interfaces, sizeof(DacpCOMInterfacePointerData) * count); PTR_ComCallWrapperTemplate pCCWTemplate = pCCW->GetSimpleWrapper()->GetComCallWrapperTemplate(); unsigned int itemIndex = 0; unsigned int wrapperOffset = 0; while (pCCW != NULL && SUCCEEDED(hr)) { for (int i = 0; i < ComCallWrapper::NumVtablePtrs && SUCCEEDED(hr); i++) { PTR_IUnknown pUnk = DACGetCOMIPFromCCW(pCCW, i); if (pUnk != NULL) { if (itemIndex >= count) { // the outBuffer is too small hr = E_INVALIDARG; break; } interfaces[itemIndex].interfacePtr = PTR_CDADDR(pUnk); // if this is the first ComCallWrapper, the 0th vtable slots is special if (wrapperOffset == 0 && i == ComCallWrapper::Slot_Basic) { // this is IDispatch/IUnknown interfaces[itemIndex].methodTable = NULL; } else { // this slot represents the class interface or an interface implemented by the class DWORD ifaceMapIndex = wrapperOffset + i - ComCallWrapper::Slot_FirstInterface; PTR_ComMethodTable pCMT = ComMethodTable::ComMethodTableFromIP(pUnk); interfaces[itemIndex].methodTable = PTR_CDADDR(pCMT->GetMethodTable()); } itemIndex++; } } pCCW = ComCallWrapper::GetNext(pCCW); wrapperOffset += ComCallWrapper::NumVtablePtrs; } if (SUCCEEDED(hr) && pNeeded) *pNeeded = itemIndex; } SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif } HRESULT ClrDataAccess::GetObjectExceptionData(CLRDATA_ADDRESS objAddr, struct DacpExceptionObjectData *data) { if (data == NULL) return E_POINTER; SOSDacEnter(); PTR_ExceptionObject pObj = dac_cast<PTR_ExceptionObject>(TO_TADDR(objAddr)); data->Message = TO_CDADDR(dac_cast<TADDR>(pObj->GetMessage())); data->InnerException = TO_CDADDR(dac_cast<TADDR>(pObj->GetInnerException())); data->StackTrace = TO_CDADDR(dac_cast<TADDR>(pObj->GetStackTraceArrayObject())); data->WatsonBuckets = TO_CDADDR(dac_cast<TADDR>(pObj->GetWatsonBucketReference())); data->StackTraceString = TO_CDADDR(dac_cast<TADDR>(pObj->GetStackTraceString())); data->RemoteStackTraceString = TO_CDADDR(dac_cast<TADDR>(pObj->GetRemoteStackTraceString())); data->HResult = pObj->GetHResult(); data->XCode = pObj->GetXCode(); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::IsRCWDCOMProxy(CLRDATA_ADDRESS rcwAddr, BOOL* isDCOMProxy) { if (isDCOMProxy == nullptr) { return E_POINTER; } *isDCOMProxy = FALSE; #ifdef FEATURE_COMINTEROP return S_OK; #else return E_NOTIMPL; #endif // FEATURE_COMINTEROP } HRESULT ClrDataAccess::GetClrNotification(CLRDATA_ADDRESS arguments[], int count, int *pNeeded) { SOSDacEnter(); *pNeeded = MAX_CLR_NOTIFICATION_ARGS; if (g_clrNotificationArguments[0] == NULL) { hr = E_FAIL; } else { for (int i = 0; i < count && i < MAX_CLR_NOTIFICATION_ARGS; i++) { arguments[i] = g_clrNotificationArguments[i]; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetPendingReJITID(CLRDATA_ADDRESS methodDesc, int *pRejitId) { if (methodDesc == 0 || pRejitId == NULL) { return E_INVALIDARG; } SOSDacEnter(); *pRejitId = -1; PTR_MethodDesc pMD = PTR_MethodDesc(TO_TADDR(methodDesc)); CodeVersionManager* pCodeVersionManager = pMD->GetCodeVersionManager(); CodeVersionManager::LockHolder codeVersioningLockHolder; ILCodeVersion ilVersion = pCodeVersionManager->GetActiveILCodeVersion(pMD); if (ilVersion.IsNull()) { hr = E_INVALIDARG; } else if (ilVersion.GetRejitState() == ILCodeVersion::kStateRequested) { *pRejitId = (int)ilVersion.GetVersionId(); } else { hr = S_FALSE; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetReJITInformation(CLRDATA_ADDRESS methodDesc, int rejitId, struct DacpReJitData2 *pReJitData) { if (methodDesc == 0 || rejitId < 0 || pReJitData == NULL) { return E_INVALIDARG; } SOSDacEnter(); PTR_MethodDesc pMD = PTR_MethodDesc(TO_TADDR(methodDesc)); CodeVersionManager* pCodeVersionManager = pMD->GetCodeVersionManager(); CodeVersionManager::LockHolder codeVersioningLockHolder; ILCodeVersion ilVersion = pCodeVersionManager->GetILCodeVersion(pMD, rejitId); if (ilVersion.IsNull()) { hr = E_INVALIDARG; } else { pReJitData->rejitID = rejitId; switch (ilVersion.GetRejitState()) { default: _ASSERTE(!"Unknown SharedRejitInfo state. DAC should be updated to understand this new state."); pReJitData->flags = DacpReJitData2::kUnknown; break; case ILCodeVersion::kStateRequested: pReJitData->flags = DacpReJitData2::kRequested; break; case ILCodeVersion::kStateActive: pReJitData->flags = DacpReJitData2::kActive; break; } pReJitData->il = TO_CDADDR(PTR_TO_TADDR(ilVersion.GetIL())); PTR_ILCodeVersionNode nodePtr = ilVersion.IsDefaultVersion() ? NULL : ilVersion.AsNode(); pReJitData->ilCodeVersionNodePtr = TO_CDADDR(PTR_TO_TADDR(nodePtr)); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetProfilerModifiedILInformation(CLRDATA_ADDRESS methodDesc, struct DacpProfilerILData *pILData) { if (methodDesc == 0 || pILData == NULL) { return E_INVALIDARG; } SOSDacEnter(); pILData->type = DacpProfilerILData::Unmodified; pILData->rejitID = 0; pILData->il = NULL; PTR_MethodDesc pMD = PTR_MethodDesc(TO_TADDR(methodDesc)); CodeVersionManager* pCodeVersionManager = pMD->GetCodeVersionManager(); CodeVersionManager::LockHolder codeVersioningLockHolder; ILCodeVersion ilVersion = pCodeVersionManager->GetActiveILCodeVersion(pMD); if (ilVersion.GetRejitState() != ILCodeVersion::kStateActive || !ilVersion.HasDefaultIL()) { pILData->type = DacpProfilerILData::ReJITModified; pILData->rejitID = static_cast<ULONG>(pCodeVersionManager->GetActiveILCodeVersion(pMD).GetVersionId()); } TADDR pDynamicIL = pMD->GetModule()->GetDynamicIL(pMD->GetMemberDef(), TRUE); if (pDynamicIL != NULL) { pILData->type = DacpProfilerILData::ILModified; pILData->il = (CLRDATA_ADDRESS)pDynamicIL; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodsWithProfilerModifiedIL(CLRDATA_ADDRESS mod, CLRDATA_ADDRESS *methodDescs, int cMethodDescs, int *pcMethodDescs) { if (mod == 0 || methodDescs == NULL || cMethodDescs == 0 || pcMethodDescs == NULL) { return E_INVALIDARG; } SOSDacEnter(); *pcMethodDescs = 0; PTR_Module pModule = PTR_Module(TO_TADDR(mod)); CodeVersionManager* pCodeVersionManager = pModule->GetCodeVersionManager(); CodeVersionManager::LockHolder codeVersioningLockHolder; LookupMap<PTR_MethodTable>::Iterator typeIter(&pModule->m_TypeDefToMethodTableMap); for (int i = 0; typeIter.Next(); i++) { if (*pcMethodDescs >= cMethodDescs) { break; } if (typeIter.GetElement()) { MethodTable* pMT = typeIter.GetElement(); for (MethodTable::IntroducedMethodIterator itMethods(pMT, FALSE); itMethods.IsValid(); itMethods.Next()) { PTR_MethodDesc pMD = dac_cast<PTR_MethodDesc>(itMethods.GetMethodDesc()); TADDR pDynamicIL = pModule->GetDynamicIL(pMD->GetMemberDef(), TRUE); ILCodeVersion ilVersion = pCodeVersionManager->GetActiveILCodeVersion(pMD); if (ilVersion.GetRejitState() != ILCodeVersion::kStateActive || !ilVersion.HasDefaultIL() || pDynamicIL != NULL) { methodDescs[*pcMethodDescs] = PTR_CDADDR(pMD); ++(*pcMethodDescs); } if (*pcMethodDescs >= cMethodDescs) { break; } } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetNumberGenerations(unsigned int *pGenerations) { if (pGenerations == NULL) { return E_INVALIDARG; } SOSDacEnter(); *pGenerations = (unsigned int)(g_gcDacGlobals->total_generation_count); SOSDacLeave(); return S_OK; } HRESULT ClrDataAccess::GetGenerationTable(unsigned int cGenerations, struct DacpGenerationData *pGenerationData, unsigned int *pNeeded) { if (cGenerations > 0 && pGenerationData == NULL) { return E_INVALIDARG; } SOSDacEnter(); HRESULT hr = S_OK; unsigned int numGenerationTableEntries = (unsigned int)(g_gcDacGlobals->total_generation_count); if (pNeeded != NULL) { *pNeeded = numGenerationTableEntries; } if (cGenerations < numGenerationTableEntries) { hr = S_FALSE; } else { if (g_gcDacGlobals->generation_table.IsValid()) { for (unsigned int i = 0; i < numGenerationTableEntries; i++) { dac_generation generation = GenerationTableIndex(g_gcDacGlobals->generation_table, i); pGenerationData[i].start_segment = (CLRDATA_ADDRESS) dac_cast<TADDR>(generation.start_segment); pGenerationData[i].allocation_start = (CLRDATA_ADDRESS) generation.allocation_start; gc_alloc_context alloc_context = generation.allocation_context; pGenerationData[i].allocContextPtr = (CLRDATA_ADDRESS)alloc_context.alloc_ptr; pGenerationData[i].allocContextLimit = (CLRDATA_ADDRESS)alloc_context.alloc_limit; } } else { hr = E_FAIL; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetFinalizationFillPointers(unsigned int cFillPointers, CLRDATA_ADDRESS *pFinalizationFillPointers, unsigned int *pNeeded) { if (cFillPointers > 0 && pFinalizationFillPointers == NULL) { return E_INVALIDARG; } SOSDacEnter(); HRESULT hr = S_OK; unsigned int numFillPointers = (unsigned int)(g_gcDacGlobals->total_generation_count + dac_finalize_queue::ExtraSegCount); if (pNeeded != NULL) { *pNeeded = numFillPointers; } if (cFillPointers < numFillPointers) { hr = S_FALSE; } else { if (g_gcDacGlobals->finalize_queue.IsValid()) { DPTR(dac_finalize_queue) fq = Dereference(g_gcDacGlobals->finalize_queue); DPTR(uint8_t*) fillPointersTable = dac_cast<TADDR>(fq) + offsetof(dac_finalize_queue, m_FillPointers); for (unsigned int i = 0; i < numFillPointers; i++) { pFinalizationFillPointers[i] = (CLRDATA_ADDRESS)*TableIndex(fillPointersTable, i, sizeof(uint8_t*)); } } else { hr = E_FAIL; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetGenerationTableSvr(CLRDATA_ADDRESS heapAddr, unsigned int cGenerations, struct DacpGenerationData *pGenerationData, unsigned int *pNeeded) { if (heapAddr == NULL || (cGenerations > 0 && pGenerationData == NULL)) { return E_INVALIDARG; } SOSDacEnter(); HRESULT hr = S_OK; #ifdef FEATURE_SVR_GC unsigned int numGenerationTableEntries = (unsigned int)(g_gcDacGlobals->total_generation_count); if (pNeeded != NULL) { *pNeeded = numGenerationTableEntries; } if (cGenerations < numGenerationTableEntries) { hr = S_FALSE; } else { TADDR heapAddress = TO_TADDR(heapAddr); if (heapAddress != 0) { for (unsigned int i = 0; i < numGenerationTableEntries; ++i) { dac_generation generation = ServerGenerationTableIndex(heapAddress, i); pGenerationData[i].start_segment = (CLRDATA_ADDRESS)dac_cast<TADDR>(generation.start_segment); pGenerationData[i].allocation_start = (CLRDATA_ADDRESS)(ULONG_PTR)generation.allocation_start; gc_alloc_context alloc_context = generation.allocation_context; pGenerationData[i].allocContextPtr = (CLRDATA_ADDRESS)(ULONG_PTR)alloc_context.alloc_ptr; pGenerationData[i].allocContextLimit = (CLRDATA_ADDRESS)(ULONG_PTR)alloc_context.alloc_limit; } } else { hr = E_FAIL; } } #else hr = E_NOTIMPL; #endif SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetFinalizationFillPointersSvr(CLRDATA_ADDRESS heapAddr, unsigned int cFillPointers, CLRDATA_ADDRESS *pFinalizationFillPointers, unsigned int *pNeeded) { if (heapAddr == NULL || (cFillPointers > 0 && pFinalizationFillPointers == NULL)) { return E_INVALIDARG; } SOSDacEnter(); HRESULT hr = S_OK; #ifdef FEATURE_SVR_GC unsigned int numFillPointers = (unsigned int)(g_gcDacGlobals->total_generation_count + dac_finalize_queue::ExtraSegCount); if (pNeeded != NULL) { *pNeeded = numFillPointers; } if (cFillPointers < numFillPointers) { hr = S_FALSE; } else { TADDR heapAddress = TO_TADDR(heapAddr); if (heapAddress != 0) { dac_gc_heap heap = LoadGcHeapData(heapAddress); dac_gc_heap* pHeap = &heap; DPTR(dac_finalize_queue) fq = pHeap->finalize_queue; DPTR(uint8_t*) pFillPointerArray= dac_cast<TADDR>(fq) + offsetof(dac_finalize_queue, m_FillPointers); for (unsigned int i = 0; i < numFillPointers; ++i) { pFinalizationFillPointers[i] = (CLRDATA_ADDRESS) pFillPointerArray[i]; } } else { hr = E_FAIL; } } #else hr = E_NOTIMPL; #endif SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAssemblyLoadContext(CLRDATA_ADDRESS methodTable, CLRDATA_ADDRESS* assemblyLoadContext) { if (methodTable == 0 || assemblyLoadContext == NULL) return E_INVALIDARG; SOSDacEnter(); PTR_MethodTable pMT = PTR_MethodTable(CLRDATA_ADDRESS_TO_TADDR(methodTable)); PTR_Module pModule = pMT->GetModule(); PTR_PEAssembly pPEAssembly = pModule->GetPEAssembly(); PTR_AssemblyBinder pBinder = pPEAssembly->GetAssemblyBinder(); INT_PTR managedAssemblyLoadContextHandle = pBinder->GetManagedAssemblyLoadContext(); TADDR managedAssemblyLoadContextAddr = 0; if (managedAssemblyLoadContextHandle != 0) { DacReadAll(managedAssemblyLoadContextHandle,&managedAssemblyLoadContextAddr,sizeof(TADDR),true); } *assemblyLoadContext = TO_CDADDR(managedAssemblyLoadContextAddr); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetBreakingChangeVersion(int* pVersion) { if (pVersion == nullptr) return E_INVALIDARG; *pVersion = SOS_BREAKING_CHANGE_VERSION; return S_OK; } HRESULT ClrDataAccess::GetObjectComWrappersData(CLRDATA_ADDRESS objAddr, CLRDATA_ADDRESS *rcw, unsigned int count, CLRDATA_ADDRESS *mowList, unsigned int *pNeeded) { #ifdef FEATURE_COMWRAPPERS if (objAddr == 0 ) { return E_INVALIDARG; } if (count > 0 && mowList == NULL) { return E_INVALIDARG; } SOSDacEnter(); if (pNeeded != NULL) { *pNeeded = 0; } if (rcw != NULL) { *rcw = 0; } PTR_SyncBlock pSyncBlk = PTR_Object(TO_TADDR(objAddr))->PassiveGetSyncBlock(); if (pSyncBlk != NULL) { PTR_InteropSyncBlockInfo pInfo = pSyncBlk->GetInteropInfoNoCreate(); if (pInfo != NULL) { if (rcw != NULL) { *rcw = TO_TADDR(pInfo->m_externalComObjectContext); } DPTR(NewHolder<ManagedObjectComWrapperByIdMap>) mapHolder(PTR_TO_MEMBER_TADDR(InteropSyncBlockInfo, pInfo, m_managedObjectComWrapperMap)); DPTR(ManagedObjectComWrapperByIdMap *)ppMap(PTR_TO_MEMBER_TADDR(NewHolder<ManagedObjectComWrapperByIdMap>, mapHolder, m_value)); DPTR(ManagedObjectComWrapperByIdMap) pMap(TO_TADDR(*ppMap)); CQuickArrayList<CLRDATA_ADDRESS> comWrappers; if (pMap != NULL) { ManagedObjectComWrapperByIdMap::Iterator iter = pMap->Begin(); while (iter != pMap->End()) { comWrappers.Push(TO_CDADDR(iter->Value())); ++iter; } } if (pNeeded != NULL) { *pNeeded = (unsigned int)comWrappers.Size(); } for (SIZE_T pos = 0; pos < comWrappers.Size(); ++pos) { if (pos >= count) { hr = S_FALSE; break; } mowList[pos] = comWrappers[pos]; } } else { hr = S_FALSE; } } else { hr = S_FALSE; } SOSDacLeave(); return hr; #else // FEATURE_COMWRAPPERS return E_NOTIMPL; #endif // FEATURE_COMWRAPPERS } HRESULT ClrDataAccess::IsComWrappersCCW(CLRDATA_ADDRESS ccw, BOOL *isComWrappersCCW) { #ifdef FEATURE_COMWRAPPERS if (ccw == 0) { return E_INVALIDARG; } SOSDacEnter(); if (isComWrappersCCW != NULL) { TADDR managedObjectWrapperPtr = DACGetManagedObjectWrapperFromCCW(ccw); *isComWrappersCCW = managedObjectWrapperPtr != NULL; hr = *isComWrappersCCW ? S_OK : S_FALSE; } SOSDacLeave(); return hr; #else // FEATURE_COMWRAPPERS return E_NOTIMPL; #endif // FEATURE_COMWRAPPERS } HRESULT ClrDataAccess::GetComWrappersCCWData(CLRDATA_ADDRESS ccw, CLRDATA_ADDRESS *managedObject, int *refCount) { #ifdef FEATURE_COMWRAPPERS if (ccw == 0) { return E_INVALIDARG; } SOSDacEnter(); TADDR managedObjectWrapperPtr = DACGetManagedObjectWrapperFromCCW(ccw); if (managedObjectWrapperPtr != NULL) { PTR_ManagedObjectWrapper pMOW(managedObjectWrapperPtr); if (managedObject != NULL) { OBJECTREF managedObjectRef; if (SUCCEEDED(DACTryGetComWrappersObjectFromCCW(ccw, &managedObjectRef))) { *managedObject = PTR_HOST_TO_TADDR(managedObjectRef); } else { *managedObject = 0; } } if (refCount != NULL) { *refCount = (int)pMOW->RefCount; } } else { // Not a ComWrappers CCW hr = E_INVALIDARG; } SOSDacLeave(); return hr; #else // FEATURE_COMWRAPPERS return E_NOTIMPL; #endif // FEATURE_COMWRAPPERS } HRESULT ClrDataAccess::IsComWrappersRCW(CLRDATA_ADDRESS rcw, BOOL *isComWrappersRCW) { #ifdef FEATURE_COMWRAPPERS if (rcw == 0) { return E_INVALIDARG; } SOSDacEnter(); if (isComWrappersRCW != NULL) { PTR_ExternalObjectContext pRCW(TO_TADDR(rcw)); BOOL stillValid = TRUE; if(pRCW->SyncBlockIndex >= SyncBlockCache::s_pSyncBlockCache->m_SyncTableSize) { stillValid = FALSE; } PTR_SyncBlock pSyncBlk = NULL; if (stillValid) { PTR_SyncTableEntry ste = PTR_SyncTableEntry(dac_cast<TADDR>(g_pSyncTable) + (sizeof(SyncTableEntry) * pRCW->SyncBlockIndex)); pSyncBlk = ste->m_SyncBlock; if(pSyncBlk == NULL) { stillValid = FALSE; } } PTR_InteropSyncBlockInfo pInfo = NULL; if (stillValid) { pInfo = pSyncBlk->GetInteropInfoNoCreate(); if(pInfo == NULL) { stillValid = FALSE; } } if (stillValid) { stillValid = TO_TADDR(pInfo->m_externalComObjectContext) == PTR_HOST_TO_TADDR(pRCW); } *isComWrappersRCW = stillValid; hr = *isComWrappersRCW ? S_OK : S_FALSE; } SOSDacLeave(); return hr; #else // FEATURE_COMWRAPPERS return E_NOTIMPL; #endif // FEATURE_COMWRAPPERS } HRESULT ClrDataAccess::GetComWrappersRCWData(CLRDATA_ADDRESS rcw, CLRDATA_ADDRESS *identity) { #ifdef FEATURE_COMWRAPPERS if (rcw == 0) { return E_INVALIDARG; } SOSDacEnter(); PTR_ExternalObjectContext pEOC(TO_TADDR(rcw)); if (identity != NULL) { *identity = PTR_CDADDR(pEOC->Identity); } SOSDacLeave(); return hr; #else // FEATURE_COMWRAPPERS return E_NOTIMPL; #endif // FEATURE_COMWRAPPERS } namespace { BOOL TryReadTaggedMemoryState( CLRDATA_ADDRESS objAddr, ICorDebugDataTarget* target, CLRDATA_ADDRESS *taggedMemory = NULL, size_t *taggedMemorySizeInBytes = NULL) { BOOL hasTaggedMemory = FALSE; #ifdef FEATURE_OBJCMARSHAL EX_TRY_ALLOW_DATATARGET_MISSING_MEMORY { PTR_SyncBlock pSyncBlk = DACGetSyncBlockFromObjectPointer(CLRDATA_ADDRESS_TO_TADDR(objAddr), target); if (pSyncBlk != NULL) { PTR_InteropSyncBlockInfo pInfo = pSyncBlk->GetInteropInfoNoCreate(); if (pInfo != NULL) { CLRDATA_ADDRESS taggedMemoryLocal = PTR_CDADDR(pInfo->GetTaggedMemory()); if (taggedMemoryLocal != NULL) { hasTaggedMemory = TRUE; if (taggedMemory) *taggedMemory = taggedMemoryLocal; if (taggedMemorySizeInBytes) *taggedMemorySizeInBytes = pInfo->GetTaggedMemorySizeInBytes(); } } } } EX_END_CATCH_ALLOW_DATATARGET_MISSING_MEMORY; #endif // FEATURE_OBJCMARSHAL return hasTaggedMemory; } } HRESULT ClrDataAccess::IsTrackedType( CLRDATA_ADDRESS objAddr, BOOL *isTrackedType, BOOL *hasTaggedMemory) { if (objAddr == 0 || isTrackedType == NULL || hasTaggedMemory == NULL) { return E_INVALIDARG; } *isTrackedType = FALSE; *hasTaggedMemory = FALSE; SOSDacEnter(); TADDR mtTADDR = DACGetMethodTableFromObjectPointer(CLRDATA_ADDRESS_TO_TADDR(objAddr), m_pTarget); if (mtTADDR==NULL) hr = E_INVALIDARG; BOOL bFree = FALSE; MethodTable *mt = NULL; if (SUCCEEDED(hr)) { mt = PTR_MethodTable(mtTADDR); if (!DacValidateMethodTable(mt, bFree)) hr = E_INVALIDARG; } if (SUCCEEDED(hr)) { *isTrackedType = mt->IsTrackedReferenceWithFinalizer(); hr = *isTrackedType ? S_OK : S_FALSE; *hasTaggedMemory = TryReadTaggedMemoryState(objAddr, m_pTarget); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetTaggedMemory( CLRDATA_ADDRESS objAddr, CLRDATA_ADDRESS *taggedMemory, size_t *taggedMemorySizeInBytes) { if (objAddr == 0 || taggedMemory == NULL || taggedMemorySizeInBytes == NULL) { return E_INVALIDARG; } *taggedMemory = NULL; *taggedMemorySizeInBytes = 0; SOSDacEnter(); if (FALSE == TryReadTaggedMemoryState(objAddr, m_pTarget, taggedMemory, taggedMemorySizeInBytes)) { hr = S_FALSE; } SOSDacLeave(); return hr; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // File: request.cpp // // // CorDataAccess::Request implementation. // //***************************************************************************** #include "stdafx.h" #include <win32threadpool.h> #include "typestring.h" #include <gccover.h> #include <virtualcallstub.h> #ifdef FEATURE_COMINTEROP #include <comcallablewrapper.h> #endif // FEATURE_COMINTEROP #ifdef FEATURE_COMWRAPPERS #include <interoplibinterface.h> #include <interoplibabi.h> typedef DPTR(InteropLibInterface::ExternalObjectContextBase) PTR_ExternalObjectContext; typedef DPTR(InteropLib::ABI::ManagedObjectWrapperLayout) PTR_ManagedObjectWrapper; #endif // FEATURE_COMWRAPPERS #ifndef TARGET_UNIX // It is unfortunate having to include this header just to get the definition of GenericModeBlock #include <msodw.h> #endif // TARGET_UNIX // To include definiton of IsThrowableThreadAbortException #include <exstatecommon.h> #include "rejit.h" #include "request_common.h" // GC headers define these to EE-specific stuff that we don't want. #undef EnterCriticalSection #undef LeaveCriticalSection #define PTR_CDADDR(ptr) TO_CDADDR(PTR_TO_TADDR(ptr)) #define HOST_CDADDR(host) TO_CDADDR(PTR_HOST_TO_TADDR(host)) #define SOSDacEnter() \ DAC_ENTER(); \ HRESULT hr = S_OK; \ EX_TRY \ { #define SOSDacLeave() \ } \ EX_CATCH \ { \ if (!DacExceptionFilter(GET_EXCEPTION(), this, &hr)) \ { \ EX_RETHROW; \ } \ } \ EX_END_CATCH(SwallowAllExceptions) \ DAC_LEAVE(); // Use this when you don't want to instantiate an Object * in the host. TADDR DACGetMethodTableFromObjectPointer(TADDR objAddr, ICorDebugDataTarget * target) { ULONG32 returned = 0; TADDR Value = NULL; HRESULT hr = target->ReadVirtual(objAddr, (PBYTE)&Value, sizeof(TADDR), &returned); if ((hr != S_OK) || (returned != sizeof(TADDR))) { return NULL; } #if TARGET_64BIT Value = Value & ~7; // equivalent to Object::GetGCSafeMethodTable() #else Value = Value & ~3; // equivalent to Object::GetGCSafeMethodTable() #endif return Value; } // Use this when you don't want to instantiate an Object * in the host. PTR_SyncBlock DACGetSyncBlockFromObjectPointer(TADDR objAddr, ICorDebugDataTarget * target) { ULONG32 returned = 0; DWORD Value = NULL; HRESULT hr = target->ReadVirtual(objAddr - sizeof(DWORD), (PBYTE)&Value, sizeof(DWORD), &returned); if ((hr != S_OK) || (returned != sizeof(DWORD))) { return NULL; } if ((Value & (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE)) != BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) return NULL; Value &= MASK_SYNCBLOCKINDEX; PTR_SyncTableEntry ste = PTR_SyncTableEntry(dac_cast<TADDR>(g_pSyncTable) + (sizeof(SyncTableEntry) * Value)); return ste->m_SyncBlock; } BOOL DacValidateEEClass(EEClass *pEEClass) { // Verify things are right. // The EEClass method table pointer should match the method table. // TODO: Microsoft, need another test for validity, this one isn't always true anymore. BOOL retval = TRUE; EX_TRY { MethodTable *pMethodTable = pEEClass->GetMethodTable(); if (!pMethodTable) { // PREfix. retval = FALSE; } else if (pEEClass != pMethodTable->GetClass()) { retval = FALSE; } } EX_CATCH { retval = FALSE; // Something is wrong } EX_END_CATCH(SwallowAllExceptions) return retval; } BOOL DacValidateMethodTable(MethodTable *pMT, BOOL &bIsFree) { // Verify things are right. BOOL retval = FALSE; EX_TRY { bIsFree = FALSE; if (HOST_CDADDR(pMT) == HOST_CDADDR(g_pFreeObjectMethodTable)) { bIsFree = TRUE; } else { // Standard fast check if (!pMT->ValidateWithPossibleAV()) goto BadMethodTable; // In rare cases, we've seen the standard check above pass when it shouldn't. // Insert additional/ad-hoc tests below. // Metadata token should look valid for a class mdTypeDef td = pMT->GetCl(); if (td != mdTokenNil && TypeFromToken(td) != mdtTypeDef) goto BadMethodTable; // BaseSize should always be greater than 0 for valid objects (unless it's an interface) // For strings, baseSize is not ptr-aligned if (!pMT->IsInterface() && !pMT->IsString()) { if (pMT->GetBaseSize() == 0 || !IS_ALIGNED(pMT->GetBaseSize(), sizeof(void *))) goto BadMethodTable; } } retval = TRUE; BadMethodTable: ; } EX_CATCH { retval = FALSE; // Something is wrong } EX_END_CATCH(SwallowAllExceptions) return retval; } BOOL DacValidateMD(MethodDesc * pMD) { if (pMD == NULL) { return FALSE; } // Verify things are right. BOOL retval = TRUE; EX_TRY { MethodTable *pMethodTable = pMD->GetMethodTable(); // Standard fast check if (!pMethodTable->ValidateWithPossibleAV()) { retval = FALSE; } if (retval && (pMD->GetSlot() >= pMethodTable->GetNumVtableSlots() && !pMD->HasNonVtableSlot())) { retval = FALSE; } if (retval) { MethodDesc *pMDCheck = MethodDesc::GetMethodDescFromStubAddr(pMD->GetTemporaryEntryPoint(), TRUE); if (PTR_HOST_TO_TADDR(pMD) != PTR_HOST_TO_TADDR(pMDCheck)) { retval = FALSE; } } if (retval && pMD->HasNativeCode() && !pMD->IsFCall()) { PCODE jitCodeAddr = pMD->GetNativeCode(); MethodDesc *pMDCheck = ExecutionManager::GetCodeMethodDesc(jitCodeAddr); if (pMDCheck) { // Check that the given MethodDesc matches the MethodDesc from // the CodeHeader if (PTR_HOST_TO_TADDR(pMD) != PTR_HOST_TO_TADDR(pMDCheck)) { retval = FALSE; } } else { retval = FALSE; } } } EX_CATCH { retval = FALSE; // Something is wrong } EX_END_CATCH(SwallowAllExceptions) return retval; } BOOL DacValidateMD(LPCVOID pMD) { return DacValidateMD((MethodDesc *)pMD); } VOID GetJITMethodInfo (EECodeInfo * pCodeInfo, JITTypes *pJITType, CLRDATA_ADDRESS *pGCInfo) { DWORD dwType = pCodeInfo->GetJitManager()->GetCodeType(); if (IsMiIL(dwType)) { *pJITType = TYPE_JIT; } else if (IsMiNative(dwType)) { *pJITType = TYPE_PJIT; } else { *pJITType = TYPE_UNKNOWN; } *pGCInfo = (CLRDATA_ADDRESS)PTR_TO_TADDR(pCodeInfo->GetGCInfo()); } HRESULT ClrDataAccess::GetWorkRequestData(CLRDATA_ADDRESS addr, struct DacpWorkRequestData *workRequestData) { if (addr == 0 || workRequestData == NULL) return E_INVALIDARG; SOSDacEnter(); WorkRequest *pRequest = PTR_WorkRequest(TO_TADDR(addr)); workRequestData->Function = (TADDR)(pRequest->Function); workRequestData->Context = (TADDR)(pRequest->Context); workRequestData->NextWorkRequest = (TADDR)(pRequest->next); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetHillClimbingLogEntry(CLRDATA_ADDRESS addr, struct DacpHillClimbingLogEntry *entry) { if (addr == 0 || entry == NULL) return E_INVALIDARG; SOSDacEnter(); HillClimbingLogEntry *pLogEntry = PTR_HillClimbingLogEntry(TO_TADDR(addr)); entry->TickCount = pLogEntry->TickCount; entry->NewControlSetting = pLogEntry->NewControlSetting; entry->LastHistoryCount = pLogEntry->LastHistoryCount; entry->LastHistoryMean = pLogEntry->LastHistoryMean; entry->Transition = pLogEntry->Transition; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetThreadpoolData(struct DacpThreadpoolData *threadpoolData) { if (threadpoolData == NULL) return E_INVALIDARG; SOSDacEnter(); threadpoolData->cpuUtilization = ThreadpoolMgr::cpuUtilization; threadpoolData->MinLimitTotalWorkerThreads = ThreadpoolMgr::MinLimitTotalWorkerThreads; threadpoolData->MaxLimitTotalWorkerThreads = ThreadpoolMgr::MaxLimitTotalWorkerThreads; // // Read ThreadpoolMgr::WorkerCounter // TADDR pCounter = DacGetTargetAddrForHostAddr(&ThreadpoolMgr::WorkerCounter,true); ThreadpoolMgr::ThreadCounter counter; DacReadAll(pCounter,&counter,sizeof(ThreadpoolMgr::ThreadCounter),true); ThreadpoolMgr::ThreadCounter::Counts counts = counter.counts; threadpoolData->NumWorkingWorkerThreads = counts.NumWorking; threadpoolData->NumIdleWorkerThreads = counts.NumActive - counts.NumWorking; threadpoolData->NumRetiredWorkerThreads = counts.NumRetired; threadpoolData->FirstUnmanagedWorkRequest = HOST_CDADDR(ThreadpoolMgr::WorkRequestHead); threadpoolData->HillClimbingLog = dac_cast<TADDR>(&HillClimbingLog); threadpoolData->HillClimbingLogFirstIndex = HillClimbingLogFirstIndex; threadpoolData->HillClimbingLogSize = HillClimbingLogSize; // // Read ThreadpoolMgr::CPThreadCounter // pCounter = DacGetTargetAddrForHostAddr(&ThreadpoolMgr::CPThreadCounter,true); DacReadAll(pCounter,&counter,sizeof(ThreadpoolMgr::ThreadCounter),true); counts = counter.counts; threadpoolData->NumCPThreads = (LONG)(counts.NumActive + counts.NumRetired); threadpoolData->NumFreeCPThreads = (LONG)(counts.NumActive - counts.NumWorking); threadpoolData->MaxFreeCPThreads = ThreadpoolMgr::MaxFreeCPThreads; threadpoolData->NumRetiredCPThreads = (LONG)(counts.NumRetired); threadpoolData->MaxLimitTotalCPThreads = ThreadpoolMgr::MaxLimitTotalCPThreads; threadpoolData->CurrentLimitTotalCPThreads = (LONG)(counts.NumActive); //legacy: currently has no meaning threadpoolData->MinLimitTotalCPThreads = ThreadpoolMgr::MinLimitTotalCPThreads; TADDR pEntry = DacGetTargetAddrForHostAddr(&ThreadpoolMgr::TimerQueue,true); ThreadpoolMgr::LIST_ENTRY entry; DacReadAll(pEntry,&entry,sizeof(ThreadpoolMgr::LIST_ENTRY),true); TADDR node = (TADDR) entry.Flink; threadpoolData->NumTimers = 0; while (node && node != pEntry) { threadpoolData->NumTimers++; DacReadAll(node,&entry,sizeof(ThreadpoolMgr::LIST_ENTRY),true); node = (TADDR) entry.Flink; } threadpoolData->AsyncTimerCallbackCompletionFPtr = (CLRDATA_ADDRESS) GFN_TADDR(ThreadpoolMgr__AsyncTimerCallbackCompletion); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetThreadStoreData(struct DacpThreadStoreData *threadStoreData) { SOSDacEnter(); ThreadStore* threadStore = ThreadStore::s_pThreadStore; if (!threadStore) { hr = E_UNEXPECTED; } else { // initialize the fields of our local structure threadStoreData->threadCount = threadStore->m_ThreadCount; threadStoreData->unstartedThreadCount = threadStore->m_UnstartedThreadCount; threadStoreData->backgroundThreadCount = threadStore->m_BackgroundThreadCount; threadStoreData->pendingThreadCount = threadStore->m_PendingThreadCount; threadStoreData->deadThreadCount = threadStore->m_DeadThreadCount; threadStoreData->fHostConfig = FALSE; // identify the "important" threads threadStoreData->firstThread = HOST_CDADDR(threadStore->m_ThreadList.GetHead()); threadStoreData->finalizerThread = HOST_CDADDR(g_pFinalizerThread); threadStoreData->gcThread = HOST_CDADDR(g_pSuspensionThread); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetStressLogAddress(CLRDATA_ADDRESS *stressLog) { if (stressLog == NULL) return E_INVALIDARG; #ifdef STRESS_LOG SOSDacEnter(); if (g_pStressLog.IsValid()) *stressLog = HOST_CDADDR(g_pStressLog); else hr = E_FAIL; SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif // STRESS_LOG } HRESULT ClrDataAccess::GetJitManagerList(unsigned int count, struct DacpJitManagerInfo managers[], unsigned int *pNeeded) { SOSDacEnter(); if (managers) { if (count >= 1) { EEJitManager * managerPtr = ExecutionManager::GetEEJitManager(); DacpJitManagerInfo *currentPtr = &managers[0]; currentPtr->managerAddr = HOST_CDADDR(managerPtr); currentPtr->codeType = managerPtr->GetCodeType(); EEJitManager *eeJitManager = PTR_EEJitManager(PTR_HOST_TO_TADDR(managerPtr)); currentPtr->ptrHeapList = HOST_CDADDR(eeJitManager->m_pCodeHeap); } } else if (pNeeded) { *pNeeded = 1; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodTableSlot(CLRDATA_ADDRESS mt, unsigned int slot, CLRDATA_ADDRESS *value) { if (mt == 0 || value == NULL) return E_INVALIDARG; SOSDacEnter(); MethodTable* mTable = PTR_MethodTable(TO_TADDR(mt)); BOOL bIsFree = FALSE; if (!DacValidateMethodTable(mTable, bIsFree)) { hr = E_INVALIDARG; } else if (slot < mTable->GetNumVtableSlots()) { // Now get the slot: *value = mTable->GetRestoredSlot(slot); } else { hr = E_INVALIDARG; MethodTable::IntroducedMethodIterator it(mTable); for (; it.IsValid() && FAILED(hr); it.Next()) { MethodDesc * pMD = it.GetMethodDesc(); if (pMD->GetSlot() == slot) { *value = pMD->GetMethodEntryPoint(); hr = S_OK; } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetCodeHeapList(CLRDATA_ADDRESS jitManager, unsigned int count, struct DacpJitCodeHeapInfo codeHeaps[], unsigned int *pNeeded) { if (jitManager == NULL) return E_INVALIDARG; SOSDacEnter(); EEJitManager *pJitManager = PTR_EEJitManager(TO_TADDR(jitManager)); HeapList *heapList = pJitManager->m_pCodeHeap; if (codeHeaps) { unsigned int i = 0; while ((heapList != NULL) && (i < count)) { CodeHeap *codeHeap = heapList->pHeap; codeHeaps[i] = DACGetHeapInfoForCodeHeap(codeHeap); heapList = heapList->hpNext; i++; } if (pNeeded) *pNeeded = i; } else if (pNeeded) { int i = 0; while (heapList != NULL) { heapList = heapList->hpNext; i++; } *pNeeded = i; } else { hr = E_INVALIDARG; } SOSDacLeave(); return hr; } DacpJitCodeHeapInfo ClrDataAccess::DACGetHeapInfoForCodeHeap(CodeHeap *heapAddr) { DacpJitCodeHeapInfo jitCodeHeapInfo; TADDR targetVtblPtrForHeapType = VPTR_HOST_VTABLE_TO_TADDR(*(LPVOID*)heapAddr); if (targetVtblPtrForHeapType == LoaderCodeHeap::VPtrTargetVTable()) { LoaderCodeHeap *loaderCodeHeap = PTR_LoaderCodeHeap(PTR_HOST_TO_TADDR(heapAddr)); jitCodeHeapInfo.codeHeapType = CODEHEAP_LOADER; jitCodeHeapInfo.LoaderHeap = TO_CDADDR(PTR_HOST_MEMBER_TADDR(LoaderCodeHeap, loaderCodeHeap, m_LoaderHeap)); } else if (targetVtblPtrForHeapType == HostCodeHeap::VPtrTargetVTable()) { HostCodeHeap *hostCodeHeap = PTR_HostCodeHeap(PTR_HOST_TO_TADDR(heapAddr)); jitCodeHeapInfo.codeHeapType = CODEHEAP_HOST; jitCodeHeapInfo.HostData.baseAddr = PTR_CDADDR(hostCodeHeap->m_pBaseAddr); jitCodeHeapInfo.HostData.currentAddr = PTR_CDADDR(hostCodeHeap->m_pLastAvailableCommittedAddr); } else { jitCodeHeapInfo.codeHeapType = CODEHEAP_UNKNOWN; } return jitCodeHeapInfo; } HRESULT ClrDataAccess::GetStackLimits(CLRDATA_ADDRESS threadPtr, CLRDATA_ADDRESS *lower, CLRDATA_ADDRESS *upper, CLRDATA_ADDRESS *fp) { if (threadPtr == 0 || (lower == NULL && upper == NULL && fp == NULL)) return E_INVALIDARG; SOSDacEnter(); Thread * thread = PTR_Thread(TO_TADDR(threadPtr)); if (lower) *lower = TO_CDADDR(thread->GetCachedStackBase().GetAddr()); if (upper) *upper = TO_CDADDR(thread->GetCachedStackLimit().GetAddr()); if (fp) *fp = PTR_HOST_MEMBER_TADDR(Thread, thread, m_pFrame); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetRegisterName(int regNum, unsigned int count, _Inout_updates_z_(count) WCHAR *buffer, unsigned int *pNeeded) { if (!buffer && !pNeeded) return E_POINTER; #ifdef TARGET_AMD64 static const WCHAR *regs[] = { W("rax"), W("rcx"), W("rdx"), W("rbx"), W("rsp"), W("rbp"), W("rsi"), W("rdi"), W("r8"), W("r9"), W("r10"), W("r11"), W("r12"), W("r13"), W("r14"), W("r15"), }; #elif defined(TARGET_ARM) static const WCHAR *regs[] = { W("r0"), W("r1"), W("r2"), W("r3"), W("r4"), W("r5"), W("r6"), W("r7"), W("r8"), W("r9"), W("r10"), W("r11"), W("r12"), W("sp"), W("lr") }; #elif defined(TARGET_ARM64) static const WCHAR *regs[] = { W("X0"), W("X1"), W("X2"), W("X3"), W("X4"), W("X5"), W("X6"), W("X7"), W("X8"), W("X9"), W("X10"), W("X11"), W("X12"), W("X13"), W("X14"), W("X15"), W("X16"), W("X17"), W("X18"), W("X19"), W("X20"), W("X21"), W("X22"), W("X23"), W("X24"), W("X25"), W("X26"), W("X27"), W("X28"), W("Fp"), W("Lr"), W("Sp") }; #elif defined(TARGET_X86) static const WCHAR *regs[] = { W("eax"), W("ecx"), W("edx"), W("ebx"), W("esp"), W("ebp"), W("esi"), W("edi"), }; #endif // Caller frame registers are encoded as "-(reg+1)". bool callerFrame = regNum < 0; if (callerFrame) regNum = -regNum-1; if ((unsigned int)regNum >= ARRAY_SIZE(regs)) return E_UNEXPECTED; const WCHAR caller[] = W("caller."); unsigned int needed = (callerFrame?(unsigned int)wcslen(caller):0) + (unsigned int)wcslen(regs[regNum]) + 1; if (pNeeded) *pNeeded = needed; if (buffer) { _snwprintf_s(buffer, count, _TRUNCATE, W("%s%s"), callerFrame ? caller : W(""), regs[regNum]); if (count < needed) return S_FALSE; } return S_OK; } HRESULT ClrDataAccess::GetStackReferences(DWORD osThreadID, ISOSStackRefEnum **ppEnum) { if (ppEnum == NULL) return E_POINTER; SOSDacEnter(); DacStackReferenceWalker *walker = new (nothrow) DacStackReferenceWalker(this, osThreadID); if (walker == NULL) { hr = E_OUTOFMEMORY; } else { hr = walker->Init(); if (SUCCEEDED(hr)) hr = walker->QueryInterface(__uuidof(ISOSStackRefEnum), (void**)ppEnum); if (FAILED(hr)) { delete walker; *ppEnum = NULL; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetThreadFromThinlockID(UINT thinLockId, CLRDATA_ADDRESS *pThread) { if (pThread == NULL) return E_INVALIDARG; SOSDacEnter(); Thread *thread = g_pThinLockThreadIdDispenser->IdToThread(thinLockId); *pThread = PTR_HOST_TO_TADDR(thread); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetThreadAllocData(CLRDATA_ADDRESS addr, struct DacpAllocData *data) { if (data == NULL) return E_POINTER; SOSDacEnter(); Thread* thread = PTR_Thread(TO_TADDR(addr)); data->allocBytes = TO_CDADDR(thread->m_alloc_context.alloc_bytes); data->allocBytesLoh = TO_CDADDR(thread->m_alloc_context.alloc_bytes_uoh); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetHeapAllocData(unsigned int count, struct DacpGenerationAllocData *data, unsigned int *pNeeded) { if (data == 0 && pNeeded == NULL) return E_INVALIDARG; SOSDacEnter(); #if defined(FEATURE_SVR_GC) if (GCHeapUtilities::IsServerHeap()) { hr = GetServerAllocData(count, data, pNeeded); } else #endif //FEATURE_SVR_GC { if (pNeeded) *pNeeded = 1; if (data && count >= 1) { DPTR(unused_generation) table = g_gcDacGlobals->generation_table; for (unsigned int i=0; i < *g_gcDacGlobals->max_gen + 2; i++) { dac_generation entry = GenerationTableIndex(table, i); data[0].allocData[i].allocBytes = (CLRDATA_ADDRESS)(ULONG_PTR) entry.allocation_context.alloc_bytes; data[0].allocData[i].allocBytesLoh = (CLRDATA_ADDRESS)(ULONG_PTR) entry.allocation_context.alloc_bytes_uoh; } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetThreadData(CLRDATA_ADDRESS threadAddr, struct DacpThreadData *threadData) { SOSDacEnter(); // marshal the Thread object from the target Thread* thread = PTR_Thread(TO_TADDR(threadAddr)); // initialize our local copy from the marshaled target Thread instance ZeroMemory (threadData, sizeof(DacpThreadData)); threadData->corThreadId = thread->m_ThreadId; threadData->osThreadId = (DWORD)thread->m_OSThreadId; threadData->state = thread->m_State; threadData->preemptiveGCDisabled = thread->m_fPreemptiveGCDisabled; threadData->allocContextPtr = TO_CDADDR(thread->m_alloc_context.alloc_ptr); threadData->allocContextLimit = TO_CDADDR(thread->m_alloc_context.alloc_limit); threadData->fiberData = NULL; threadData->pFrame = PTR_CDADDR(thread->m_pFrame); threadData->context = PTR_CDADDR(thread->m_pDomain); threadData->domain = PTR_CDADDR(thread->m_pDomain); threadData->lockCount = (DWORD)-1; #ifndef TARGET_UNIX threadData->teb = TO_CDADDR(thread->m_pTEB); #else threadData->teb = NULL; #endif threadData->lastThrownObjectHandle = TO_CDADDR(thread->m_LastThrownObjectHandle); threadData->nextThread = HOST_CDADDR(ThreadStore::s_pThreadStore->m_ThreadList.GetNext(thread)); #ifdef FEATURE_EH_FUNCLETS if (thread->m_ExceptionState.m_pCurrentTracker) { threadData->firstNestedException = PTR_HOST_TO_TADDR( thread->m_ExceptionState.m_pCurrentTracker->m_pPrevNestedInfo); } #else threadData->firstNestedException = PTR_HOST_TO_TADDR( thread->m_ExceptionState.m_currentExInfo.m_pPrevNestedInfo); #endif // FEATURE_EH_FUNCLETS SOSDacLeave(); return hr; } #ifdef FEATURE_REJIT void CopyNativeCodeVersionToReJitData(NativeCodeVersion nativeCodeVersion, NativeCodeVersion activeCodeVersion, DacpReJitData * pReJitData) { pReJitData->rejitID = nativeCodeVersion.GetILCodeVersion().GetVersionId(); pReJitData->NativeCodeAddr = nativeCodeVersion.GetNativeCode(); if (nativeCodeVersion != activeCodeVersion) { pReJitData->flags = DacpReJitData::kReverted; } else { switch (nativeCodeVersion.GetILCodeVersion().GetRejitState()) { default: _ASSERTE(!"Unknown SharedRejitInfo state. DAC should be updated to understand this new state."); pReJitData->flags = DacpReJitData::kUnknown; break; case ILCodeVersion::kStateRequested: pReJitData->flags = DacpReJitData::kRequested; break; case ILCodeVersion::kStateActive: pReJitData->flags = DacpReJitData::kActive; break; } } } #endif // FEATURE_REJIT //--------------------------------------------------------------------------------------- // // Given a method desc addr, this loads up DacpMethodDescData and multiple DacpReJitDatas // with data on that method // // Arguments: // * methodDesc - MD to look up // * ip - IP address of interest (e.g., from an !ip2md call). This is used to ensure // the rejitted version corresponding to this IP is returned. May be NULL if you // don't care. // * methodDescData - [out] DacpMethodDescData to populate // * cRevertedRejitVersions - Number of entries allocated in rgRevertedRejitData // array // * rgRevertedRejitData - [out] Array of DacpReJitDatas to populate with rejitted // rejit version data // * pcNeededRevertedRejitData - [out] If cRevertedRejitVersions==0, the total // number of available rejit versions (including the current version) is // returned here. Else, the number of reverted rejit data actually fetched is // returned here. // // Return Value: // HRESULT indicating success or failure. // HRESULT ClrDataAccess::GetMethodDescData( CLRDATA_ADDRESS methodDesc, CLRDATA_ADDRESS ip, struct DacpMethodDescData *methodDescData, ULONG cRevertedRejitVersions, DacpReJitData * rgRevertedRejitData, ULONG * pcNeededRevertedRejitData) { if (methodDesc == 0) return E_INVALIDARG; if ((cRevertedRejitVersions != 0) && (rgRevertedRejitData == NULL)) { return E_INVALIDARG; } if ((rgRevertedRejitData != NULL) && (pcNeededRevertedRejitData == NULL)) { // If you're asking for reverted rejit data, you'd better ask for the number of // elements we return return E_INVALIDARG; } SOSDacEnter(); PTR_MethodDesc pMD = PTR_MethodDesc(TO_TADDR(methodDesc)); if (!DacValidateMD(pMD)) { hr = E_INVALIDARG; } else { ZeroMemory(methodDescData, sizeof(DacpMethodDescData)); if (rgRevertedRejitData != NULL) ZeroMemory(rgRevertedRejitData, sizeof(*rgRevertedRejitData) * cRevertedRejitVersions); if (pcNeededRevertedRejitData != NULL) *pcNeededRevertedRejitData = 0; NativeCodeVersion requestedNativeCodeVersion, activeNativeCodeVersion; if (ip != NULL) { requestedNativeCodeVersion = ExecutionManager::GetNativeCodeVersion(CLRDATA_ADDRESS_TO_TADDR(ip)); } else { #ifdef FEATURE_CODE_VERSIONING activeNativeCodeVersion = pMD->GetCodeVersionManager()->GetActiveILCodeVersion(pMD).GetActiveNativeCodeVersion(pMD); #else activeNativeCodeVersion = NativeCodeVersion(pMD); #endif requestedNativeCodeVersion = activeNativeCodeVersion; } methodDescData->requestedIP = ip; methodDescData->bIsDynamic = (pMD->IsLCGMethod()) ? TRUE : FALSE; methodDescData->wSlotNumber = pMD->GetSlot(); if (!requestedNativeCodeVersion.IsNull() && requestedNativeCodeVersion.GetNativeCode() != NULL) { methodDescData->bHasNativeCode = TRUE; methodDescData->NativeCodeAddr = TO_CDADDR(PCODEToPINSTR(requestedNativeCodeVersion.GetNativeCode())); } else { methodDescData->bHasNativeCode = FALSE; methodDescData->NativeCodeAddr = (CLRDATA_ADDRESS)-1; } methodDescData->AddressOfNativeCodeSlot = pMD->HasNativeCodeSlot() ? TO_CDADDR(dac_cast<TADDR>(pMD->GetAddrOfNativeCodeSlot())) : NULL; methodDescData->MDToken = pMD->GetMemberDef(); methodDescData->MethodDescPtr = methodDesc; methodDescData->MethodTablePtr = HOST_CDADDR(pMD->GetMethodTable()); methodDescData->ModulePtr = HOST_CDADDR(pMD->GetModule()); #ifdef FEATURE_REJIT // If rejit info is appropriate, get the following: // * ReJitInfo for the current, active version of the method // * ReJitInfo for the requested IP (for !ip2md and !u) // * ReJitInfos for all reverted versions of the method (up to // cRevertedRejitVersions) // // Minidumps will not have all this rejit info, and failure to get rejit info // should not be fatal. So enclose all rejit stuff in a try. EX_TRY { CodeVersionManager *pCodeVersionManager = pMD->GetCodeVersionManager(); // Current ReJitInfo if (activeNativeCodeVersion.IsNull()) { ILCodeVersion activeILCodeVersion = pCodeVersionManager->GetActiveILCodeVersion(pMD); activeNativeCodeVersion = activeILCodeVersion.GetActiveNativeCodeVersion(pMD); } CopyNativeCodeVersionToReJitData( activeNativeCodeVersion, activeNativeCodeVersion, &methodDescData->rejitDataCurrent); // Requested ReJitInfo _ASSERTE(methodDescData->rejitDataRequested.rejitID == 0); if (ip != NULL && !requestedNativeCodeVersion.IsNull()) { CopyNativeCodeVersionToReJitData( requestedNativeCodeVersion, activeNativeCodeVersion, &methodDescData->rejitDataRequested); } // Total number of jitted rejit versions ULONG cJittedRejitVersions; if (SUCCEEDED(ReJitManager::GetReJITIDs(pMD, 0 /* cReJitIds */, &cJittedRejitVersions, NULL /* reJitIds */))) { methodDescData->cJittedRejitVersions = cJittedRejitVersions; } // Reverted ReJitInfos if (rgRevertedRejitData == NULL) { // No reverted rejit versions will be returned, but maybe caller wants a // count of all versions if (pcNeededRevertedRejitData != NULL) { *pcNeededRevertedRejitData = methodDescData->cJittedRejitVersions; } } else { // Caller wants some reverted rejit versions. Gather reverted rejit version data to return ULONG cReJitIds; StackSArray<ReJITID> reJitIds; // Prepare array to populate with rejitids. "+ 1" because GetReJITIDs // returns all available rejitids, including the rejitid for the one non-reverted // current version. ReJITID *rgReJitIds = reJitIds.OpenRawBuffer(cRevertedRejitVersions + 1); if (rgReJitIds != NULL) { hr = ReJitManager::GetReJITIDs(pMD, cRevertedRejitVersions + 1, &cReJitIds, rgReJitIds); if (SUCCEEDED(hr)) { // Go through rejitids. For each reverted one, populate a entry in rgRevertedRejitData reJitIds.CloseRawBuffer(cReJitIds); ULONG iRejitDataReverted = 0; ILCodeVersion activeVersion = pCodeVersionManager->GetActiveILCodeVersion(pMD); for (COUNT_T i = 0; (i < cReJitIds) && (iRejitDataReverted < cRevertedRejitVersions); i++) { ILCodeVersion ilCodeVersion = pCodeVersionManager->GetILCodeVersion(pMD, reJitIds[i]); if ((ilCodeVersion.IsNull()) || (ilCodeVersion == activeVersion)) { continue; } NativeCodeVersion activeRejitChild = ilCodeVersion.GetActiveNativeCodeVersion(pMD); CopyNativeCodeVersionToReJitData( activeRejitChild, activeNativeCodeVersion, &rgRevertedRejitData[iRejitDataReverted]); iRejitDataReverted++; } // pcNeededRevertedRejitData != NULL as per condition at top of function (cuz rgRevertedRejitData != // NULL). *pcNeededRevertedRejitData = iRejitDataReverted; } } } } EX_CATCH { if (pcNeededRevertedRejitData != NULL) *pcNeededRevertedRejitData = 0; } EX_END_CATCH(SwallowAllExceptions) hr = S_OK; // Failure to get rejitids is not fatal #endif // FEATURE_REJIT #ifdef HAVE_GCCOVER if (!requestedNativeCodeVersion.IsNull()) { PTR_GCCoverageInfo gcCover = requestedNativeCodeVersion.GetGCCoverageInfo(); if (gcCover != NULL) { // In certain minidumps, we won't save the gccover information. // (it would be unwise to do so, it is heavy and not a customer scenario). methodDescData->GCStressCodeCopy = HOST_CDADDR(gcCover) + offsetof(GCCoverageInfo, savedCode); } } #endif // HAVE_GCCOVER // Set this above Dario since you know how to tell if dynamic if (methodDescData->bIsDynamic) { DynamicMethodDesc *pDynamicMethod = PTR_DynamicMethodDesc(TO_TADDR(methodDesc)); if (pDynamicMethod) { LCGMethodResolver *pResolver = pDynamicMethod->GetLCGMethodResolver(); if (pResolver) { OBJECTREF value = pResolver->GetManagedResolver(); if (value) { FieldDesc *pField = (&g_CoreLib)->GetField(FIELD__DYNAMICRESOLVER__DYNAMIC_METHOD); _ASSERTE(pField); value = pField->GetRefValue(value); if (value) { methodDescData->managedDynamicMethodObject = PTR_HOST_TO_TADDR(value); } } } } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetTieredVersions( CLRDATA_ADDRESS methodDesc, int rejitId, struct DacpTieredVersionData *nativeCodeAddrs, int cNativeCodeAddrs, int *pcNativeCodeAddrs) { if (methodDesc == 0 || cNativeCodeAddrs == 0 || pcNativeCodeAddrs == NULL) { return E_INVALIDARG; } *pcNativeCodeAddrs = 0; SOSDacEnter(); #ifdef FEATURE_REJIT PTR_MethodDesc pMD = PTR_MethodDesc(TO_TADDR(methodDesc)); // If rejit info is appropriate, get the following: // * ReJitInfo for the current, active version of the method // * ReJitInfo for the requested IP (for !ip2md and !u) // * ReJitInfos for all reverted versions of the method (up to // cRevertedRejitVersions) // // Minidumps will not have all this rejit info, and failure to get rejit info // should not be fatal. So enclose all rejit stuff in a try. EX_TRY { CodeVersionManager *pCodeVersionManager = pMD->GetCodeVersionManager(); ILCodeVersion ilCodeVersion = pCodeVersionManager->GetILCodeVersion(pMD, rejitId); if (ilCodeVersion.IsNull()) { // Bad rejit ID hr = E_INVALIDARG; goto cleanup; } TADDR r2rImageBase = NULL; TADDR r2rImageEnd = NULL; { PTR_Module pModule = (PTR_Module)pMD->GetModule(); if (pModule->IsReadyToRun()) { PTR_PEImageLayout pImage = pModule->GetReadyToRunInfo()->GetImage(); r2rImageBase = dac_cast<TADDR>(pImage->GetBase()); r2rImageEnd = r2rImageBase + pImage->GetSize(); } } NativeCodeVersionCollection nativeCodeVersions = ilCodeVersion.GetNativeCodeVersions(pMD); int count = 0; for (NativeCodeVersionIterator iter = nativeCodeVersions.Begin(); iter != nativeCodeVersions.End(); iter++) { TADDR pNativeCode = PCODEToPINSTR((*iter).GetNativeCode()); nativeCodeAddrs[count].NativeCodeAddr = pNativeCode; PTR_NativeCodeVersionNode pNode = (*iter).AsNode(); nativeCodeAddrs[count].NativeCodeVersionNodePtr = TO_CDADDR(PTR_TO_TADDR(pNode)); if (r2rImageBase <= pNativeCode && pNativeCode < r2rImageEnd) { nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_ReadyToRun; } else if (pMD->IsEligibleForTieredCompilation()) { switch ((*iter).GetOptimizationTier()) { default: nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_Unknown; break; case NativeCodeVersion::OptimizationTier0: nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_QuickJitted; break; case NativeCodeVersion::OptimizationTier1: nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_OptimizedTier1; break; case NativeCodeVersion::OptimizationTier1OSR: nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_OptimizedTier1OSR; break; case NativeCodeVersion::OptimizationTierOptimized: nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_Optimized; break; } } else if (pMD->IsJitOptimizationDisabled()) { nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_MinOptJitted; } else { nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_Optimized; } ++count; if (count >= cNativeCodeAddrs) { hr = S_FALSE; break; } } *pcNativeCodeAddrs = count; } EX_CATCH { hr = E_FAIL; } EX_END_CATCH(SwallowAllExceptions) cleanup: ; #endif // FEATURE_REJIT SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodDescTransparencyData(CLRDATA_ADDRESS methodDesc, struct DacpMethodDescTransparencyData *data) { if (methodDesc == 0 || data == NULL) return E_INVALIDARG; SOSDacEnter(); MethodDesc *pMD = PTR_MethodDesc(TO_TADDR(methodDesc)); if (!DacValidateMD(pMD)) { hr = E_INVALIDARG; } else { ZeroMemory(data, sizeof(DacpMethodDescTransparencyData)); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetCodeHeaderData(CLRDATA_ADDRESS ip, struct DacpCodeHeaderData *codeHeaderData) { if (ip == 0 || codeHeaderData == NULL) return E_INVALIDARG; SOSDacEnter(); EECodeInfo codeInfo(TO_TADDR(ip)); if (!codeInfo.IsValid()) { // We may be able to walk stubs to find a method desc if it's not a jitted method. MethodDesc *methodDescI = MethodTable::GetMethodDescForSlotAddress(TO_TADDR(ip)); if (methodDescI == NULL) { hr = E_INVALIDARG; } else { codeHeaderData->MethodDescPtr = HOST_CDADDR(methodDescI); codeHeaderData->JITType = TYPE_UNKNOWN; codeHeaderData->GCInfo = NULL; codeHeaderData->MethodStart = NULL; codeHeaderData->MethodSize = 0; codeHeaderData->ColdRegionStart = NULL; } } else { codeHeaderData->MethodDescPtr = HOST_CDADDR(codeInfo.GetMethodDesc()); GetJITMethodInfo(&codeInfo, &codeHeaderData->JITType, &codeHeaderData->GCInfo); codeHeaderData->MethodStart = (CLRDATA_ADDRESS) codeInfo.GetStartAddress(); size_t methodSize = codeInfo.GetCodeManager()->GetFunctionSize(codeInfo.GetGCInfoToken()); _ASSERTE(FitsIn<DWORD>(methodSize)); codeHeaderData->MethodSize = static_cast<DWORD>(methodSize); IJitManager::MethodRegionInfo methodRegionInfo = {NULL, 0, NULL, 0}; codeInfo.GetMethodRegionInfo(&methodRegionInfo); codeHeaderData->HotRegionSize = (DWORD) methodRegionInfo.hotSize; codeHeaderData->ColdRegionSize = (DWORD) methodRegionInfo.coldSize; codeHeaderData->ColdRegionStart = (CLRDATA_ADDRESS) methodRegionInfo.coldStartAddress; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodDescPtrFromFrame(CLRDATA_ADDRESS frameAddr, CLRDATA_ADDRESS * ppMD) { if (frameAddr == 0 || ppMD == NULL) return E_INVALIDARG; SOSDacEnter(); Frame *pFrame = PTR_Frame(TO_TADDR(frameAddr)); CLRDATA_ADDRESS methodDescAddr = HOST_CDADDR(pFrame->GetFunction()); if ((methodDescAddr == NULL) || !DacValidateMD(PTR_MethodDesc(TO_TADDR(methodDescAddr)))) { hr = E_INVALIDARG; } else { *ppMD = methodDescAddr; hr = S_OK; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodDescPtrFromIP(CLRDATA_ADDRESS ip, CLRDATA_ADDRESS * ppMD) { if (ip == 0 || ppMD == NULL) return E_INVALIDARG; SOSDacEnter(); EECodeInfo codeInfo(TO_TADDR(ip)); if (!codeInfo.IsValid()) { hr = E_FAIL; } else { CLRDATA_ADDRESS pMD = HOST_CDADDR(codeInfo.GetMethodDesc()); if ((pMD == NULL) || !DacValidateMD(PTR_MethodDesc(TO_TADDR(pMD)))) { hr = E_INVALIDARG; } else { *ppMD = pMD; hr = S_OK; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodDescName(CLRDATA_ADDRESS methodDesc, unsigned int count, _Inout_updates_z_(count) WCHAR *name, unsigned int *pNeeded) { if (methodDesc == 0) return E_INVALIDARG; SOSDacEnter(); MethodDesc* pMD = PTR_MethodDesc(TO_TADDR(methodDesc)); StackSString str; EX_TRY { TypeString::AppendMethodInternal(str, pMD, TypeString::FormatSignature|TypeString::FormatNamespace|TypeString::FormatFullInst); } EX_CATCH { hr = E_FAIL; if (pMD->IsDynamicMethod()) { if (pMD->IsLCGMethod() || pMD->IsILStub()) { // In heap dumps, trying to format the signature can fail // in certain cases. str.Clear(); TypeString::AppendMethodInternal(str, pMD, TypeString::FormatNamespace|TypeString::FormatFullInst); hr = S_OK; } } else { #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS if (MdCacheGetEEName(TO_TADDR(methodDesc), str)) { hr = S_OK; } else { #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS str.Clear(); Module* pModule = pMD->GetModule(); if (pModule) { WCHAR path[MAX_LONGPATH]; COUNT_T nChars = 0; if (pModule->GetPath().DacGetUnicode(ARRAY_SIZE(path), path, &nChars) && nChars > 0 && nChars <= ARRAY_SIZE(path)) { WCHAR* pFile = path + nChars - 1; while ((pFile >= path) && (*pFile != W('\\'))) { pFile--; } pFile++; if (*pFile) { str.Append(pFile); str.Append(W("!Unknown")); hr = S_OK; } } } #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS } #endif } } EX_END_CATCH(SwallowAllExceptions) if (SUCCEEDED(hr)) { const WCHAR *val = str.GetUnicode(); if (pNeeded) *pNeeded = str.GetCount() + 1; if (name && count) { wcsncpy_s(name, count, val, _TRUNCATE); name[count-1] = 0; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetDomainFromContext(CLRDATA_ADDRESS contextAddr, CLRDATA_ADDRESS *domain) { if (contextAddr == 0 || domain == NULL) return E_INVALIDARG; SOSDacEnter(); *domain = contextAddr; // Context is same as the AppDomain in CoreCLR SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetObjectStringData(CLRDATA_ADDRESS obj, unsigned int count, _Inout_updates_z_(count) WCHAR *stringData, unsigned int *pNeeded) { if (obj == 0) return E_INVALIDARG; if ((stringData == 0 || count <= 0) && (pNeeded == NULL)) return E_INVALIDARG; SOSDacEnter(); TADDR mtTADDR = DACGetMethodTableFromObjectPointer(TO_TADDR(obj), m_pTarget); MethodTable *mt = PTR_MethodTable(mtTADDR); // Object must be a string BOOL bFree = FALSE; if (!DacValidateMethodTable(mt, bFree)) hr = E_INVALIDARG; else if (HOST_CDADDR(mt) != HOST_CDADDR(g_pStringClass)) hr = E_INVALIDARG; if (SUCCEEDED(hr)) { PTR_StringObject str(TO_TADDR(obj)); ULONG32 needed = (ULONG32)str->GetStringLength() + 1; if (stringData && count > 0) { if (count > needed) count = needed; TADDR pszStr = TO_TADDR(obj)+offsetof(StringObject, m_FirstChar); hr = m_pTarget->ReadVirtual(pszStr, (PBYTE)stringData, count * sizeof(WCHAR), &needed); if (SUCCEEDED(hr)) stringData[count - 1] = W('\0'); else stringData[0] = W('\0'); } else { hr = E_INVALIDARG; } if (pNeeded) *pNeeded = needed; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetObjectClassName(CLRDATA_ADDRESS obj, unsigned int count, _Inout_updates_z_(count) WCHAR *className, unsigned int *pNeeded) { if (obj == 0) return E_INVALIDARG; SOSDacEnter(); // Don't turn the Object into a pointer, it is too costly on // scans of the gc heap. MethodTable *mt = NULL; TADDR mtTADDR = DACGetMethodTableFromObjectPointer(CLRDATA_ADDRESS_TO_TADDR(obj), m_pTarget); if (mtTADDR != NULL) mt = PTR_MethodTable(mtTADDR); else hr = E_INVALIDARG; BOOL bFree = FALSE; if (SUCCEEDED(hr) && !DacValidateMethodTable(mt, bFree)) hr = E_INVALIDARG; if (SUCCEEDED(hr)) { // There is a case where metadata was unloaded and the AppendType call will fail. // This is when an AppDomain has been unloaded but not yet collected. PEAssembly *pPEAssembly = mt->GetModule()->GetPEAssembly(); if (pPEAssembly->GetPEImage() == NULL) { if (pNeeded) *pNeeded = 16; if (className) wcsncpy_s(className, count, W("<Unloaded Type>"), _TRUNCATE); } else { StackSString s; TypeString::AppendType(s, TypeHandle(mt), TypeString::FormatNamespace|TypeString::FormatFullInst); const WCHAR *val = s.GetUnicode(); if (pNeeded) *pNeeded = s.GetCount() + 1; if (className && count) { wcsncpy_s(className, count, val, _TRUNCATE); className[count-1] = 0; } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodDescFromToken(CLRDATA_ADDRESS moduleAddr, mdToken token, CLRDATA_ADDRESS *methodDesc) { if (moduleAddr == 0 || methodDesc == NULL) return E_INVALIDARG; SOSDacEnter(); Module* pModule = PTR_Module(TO_TADDR(moduleAddr)); TypeHandle th; switch (TypeFromToken(token)) { case mdtFieldDef: *methodDesc = HOST_CDADDR(pModule->LookupFieldDef(token)); break; case mdtMethodDef: *methodDesc = HOST_CDADDR(pModule->LookupMethodDef(token)); break; case mdtTypeDef: th = pModule->LookupTypeDef(token); *methodDesc = th.AsTAddr(); break; case mdtTypeRef: th = pModule->LookupTypeRef(token); *methodDesc = th.AsTAddr(); break; default: hr = E_INVALIDARG; break; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::TraverseModuleMap(ModuleMapType mmt, CLRDATA_ADDRESS moduleAddr, MODULEMAPTRAVERSE pCallback, LPVOID token) { if (moduleAddr == 0) return E_INVALIDARG; SOSDacEnter(); Module* pModule = PTR_Module(TO_TADDR(moduleAddr)); // We want to traverse these two tables, passing callback information switch (mmt) { case TYPEDEFTOMETHODTABLE: { LookupMap<PTR_MethodTable>::Iterator typeIter(&pModule->m_TypeDefToMethodTableMap); for (int i = 0; typeIter.Next(); i++) { if (typeIter.GetElement()) { MethodTable* pMT = typeIter.GetElement(); (pCallback)(i,PTR_HOST_TO_TADDR(pMT), token); } } } break; case TYPEREFTOMETHODTABLE: { LookupMap<PTR_TypeRef>::Iterator typeIter(&pModule->m_TypeRefToMethodTableMap); for (int i = 0; typeIter.Next(); i++) { if (typeIter.GetElement()) { MethodTable* pMT = TypeHandle::FromTAddr(dac_cast<TADDR>(typeIter.GetElement())).GetMethodTable(); (pCallback)(i,PTR_HOST_TO_TADDR(pMT), token); } } } break; default: hr = E_INVALIDARG; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetModule(CLRDATA_ADDRESS addr, IXCLRDataModule **mod) { if (addr == 0 || mod == NULL) return E_INVALIDARG; SOSDacEnter(); Module* pModule = PTR_Module(TO_TADDR(addr)); *mod = new ClrDataModule(this, pModule); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetModuleData(CLRDATA_ADDRESS addr, struct DacpModuleData *ModuleData) { if (addr == 0 || ModuleData == NULL) return E_INVALIDARG; SOSDacEnter(); Module* pModule = PTR_Module(TO_TADDR(addr)); ZeroMemory(ModuleData,sizeof(DacpModuleData)); ModuleData->Address = addr; ModuleData->PEAssembly = HOST_CDADDR(pModule->GetPEAssembly()); COUNT_T metadataSize = 0; if (!pModule->GetPEAssembly()->IsDynamic()) { ModuleData->ilBase = (CLRDATA_ADDRESS)(ULONG_PTR) pModule->GetPEAssembly()->GetIJWBase(); } ModuleData->metadataStart = (CLRDATA_ADDRESS)dac_cast<TADDR>(pModule->GetPEAssembly()->GetLoadedMetadata(&metadataSize)); ModuleData->metadataSize = (SIZE_T) metadataSize; ModuleData->bIsReflection = pModule->IsReflection(); ModuleData->bIsPEFile = pModule->IsPEFile(); ModuleData->Assembly = HOST_CDADDR(pModule->GetAssembly()); ModuleData->dwModuleID = pModule->GetModuleID(); ModuleData->dwModuleIndex = pModule->GetModuleIndex().m_dwIndex; ModuleData->dwTransientFlags = pModule->m_dwTransientFlags; EX_TRY { // // In minidump's case, these data structure is not avaiable. // ModuleData->TypeDefToMethodTableMap = PTR_CDADDR(pModule->m_TypeDefToMethodTableMap.pTable); ModuleData->TypeRefToMethodTableMap = PTR_CDADDR(pModule->m_TypeRefToMethodTableMap.pTable); ModuleData->MethodDefToDescMap = PTR_CDADDR(pModule->m_MethodDefToDescMap.pTable); ModuleData->FieldDefToDescMap = PTR_CDADDR(pModule->m_FieldDefToDescMap.pTable); ModuleData->MemberRefToDescMap = NULL; ModuleData->FileReferencesMap = PTR_CDADDR(pModule->m_FileReferencesMap.pTable); ModuleData->ManifestModuleReferencesMap = PTR_CDADDR(pModule->m_ManifestModuleReferencesMap.pTable); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions) SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetILForModule(CLRDATA_ADDRESS moduleAddr, DWORD rva, CLRDATA_ADDRESS *il) { if (moduleAddr == 0 || il == NULL) return E_INVALIDARG; SOSDacEnter(); Module* pModule = PTR_Module(TO_TADDR(moduleAddr)); *il = (TADDR)(CLRDATA_ADDRESS)pModule->GetIL(rva); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodTableData(CLRDATA_ADDRESS mt, struct DacpMethodTableData *MTData) { if (mt == 0 || MTData == NULL) return E_INVALIDARG; SOSDacEnter(); MethodTable* pMT = PTR_MethodTable(TO_TADDR(mt)); BOOL bIsFree = FALSE; if (!DacValidateMethodTable(pMT, bIsFree)) { hr = E_INVALIDARG; } else { ZeroMemory(MTData,sizeof(DacpMethodTableData)); MTData->BaseSize = pMT->GetBaseSize(); if(pMT->IsString()) MTData->BaseSize -= sizeof(WCHAR); MTData->ComponentSize = (DWORD)pMT->GetComponentSize(); MTData->bIsFree = bIsFree; if(!bIsFree) { MTData->Module = HOST_CDADDR(pMT->GetModule()); MTData->Class = HOST_CDADDR(pMT->GetClass()); MTData->ParentMethodTable = HOST_CDADDR(pMT->GetParentMethodTable());; MTData->wNumInterfaces = pMT->GetNumInterfaces(); MTData->wNumMethods = pMT->GetNumMethods(); MTData->wNumVtableSlots = pMT->GetNumVtableSlots(); MTData->wNumVirtuals = pMT->GetNumVirtuals(); MTData->cl = pMT->GetCl(); MTData->dwAttrClass = pMT->GetAttrClass(); MTData->bContainsPointers = pMT->ContainsPointers(); MTData->bIsShared = FALSE; MTData->bIsDynamic = pMT->IsDynamicStatics(); } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodTableName(CLRDATA_ADDRESS mt, unsigned int count, _Inout_updates_z_(count) WCHAR *mtName, unsigned int *pNeeded) { if (mt == 0) return E_INVALIDARG; SOSDacEnter(); MethodTable *pMT = PTR_MethodTable(TO_TADDR(mt)); BOOL free = FALSE; if (mt == HOST_CDADDR(g_pFreeObjectMethodTable)) { if (pNeeded) *pNeeded = 5; if (mtName && count) wcsncpy_s(mtName, count, W("Free"), _TRUNCATE); } else if (!DacValidateMethodTable(pMT, free)) { hr = E_INVALIDARG; } else { // There is a case where metadata was unloaded and the AppendType call will fail. // This is when an AppDomain has been unloaded but not yet collected. PEAssembly *pPEAssembly = pMT->GetModule()->GetPEAssembly(); if (pPEAssembly->GetPEImage() == NULL) { if (pNeeded) *pNeeded = 16; if (mtName) wcsncpy_s(mtName, count, W("<Unloaded Type>"), _TRUNCATE); } else { StackSString s; #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS EX_TRY { #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS TypeString::AppendType(s, TypeHandle(pMT), TypeString::FormatNamespace|TypeString::FormatFullInst); #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS } EX_CATCH { if (!MdCacheGetEEName(dac_cast<TADDR>(pMT), s)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS if (s.IsEmpty()) { hr = E_OUTOFMEMORY; } else { const WCHAR *val = s.GetUnicode(); if (pNeeded) *pNeeded = s.GetCount() + 1; if (mtName && count) { wcsncpy_s(mtName, count, val, _TRUNCATE); mtName[count-1] = 0; } } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetFieldDescData(CLRDATA_ADDRESS addr, struct DacpFieldDescData *FieldDescData) { if (addr == 0 || FieldDescData == NULL) return E_INVALIDARG; SOSDacEnter(); FieldDesc* pFieldDesc = PTR_FieldDesc(TO_TADDR(addr)); FieldDescData->Type = pFieldDesc->GetFieldType(); FieldDescData->sigType = FieldDescData->Type; EX_TRY { // minidump case, we do not have the field's type's type handle! // Strike should be able to form name based on the metadata token in // the field desc. Find type is using look up map which is huge. We cannot // drag in this data structure in minidump's case. // TypeHandle th = pFieldDesc->LookupFieldTypeHandle(); MethodTable *pMt = th.GetMethodTable(); if (pMt) { FieldDescData->MTOfType = HOST_CDADDR(th.GetMethodTable()); } else { FieldDescData->MTOfType = NULL; } } EX_CATCH { FieldDescData->MTOfType = NULL; } EX_END_CATCH(SwallowAllExceptions) // TODO: This is not currently useful, I need to get the module of the // type definition not that of the field description. // TODO: Is there an easier way to get this information? // I'm getting the typeDef of a (possibly unloaded) type. MetaSig tSig(pFieldDesc); tSig.NextArg(); SigPointer sp1 = tSig.GetArgProps(); CorElementType et; hr = sp1.GetElemType(&et); // throw away the value, we just need to walk past. if (SUCCEEDED(hr)) { if (et == ELEMENT_TYPE_CLASS || et == ELEMENT_TYPE_VALUETYPE) // any other follows token? { hr = sp1.GetToken(&(FieldDescData->TokenOfType)); } else { // There is no encoded token of field type FieldDescData->TokenOfType = mdTypeDefNil; if (FieldDescData->MTOfType == NULL) { // If there is no encoded token (that is, it is primitive type) and no MethodTable for it, remember the // element_type from signature // FieldDescData->sigType = et; } } } FieldDescData->ModuleOfType = HOST_CDADDR(pFieldDesc->GetModule()); FieldDescData->mb = pFieldDesc->GetMemberDef(); FieldDescData->MTOfEnclosingClass = HOST_CDADDR(pFieldDesc->GetApproxEnclosingMethodTable()); FieldDescData->dwOffset = pFieldDesc->GetOffset(); FieldDescData->bIsThreadLocal = pFieldDesc->IsThreadStatic(); FieldDescData->bIsContextLocal = FALSE; FieldDescData->bIsStatic = pFieldDesc->IsStatic(); FieldDescData->NextField = HOST_CDADDR(PTR_FieldDesc(PTR_HOST_TO_TADDR(pFieldDesc) + sizeof(FieldDesc))); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodTableFieldData(CLRDATA_ADDRESS mt, struct DacpMethodTableFieldData *data) { if (mt == 0 || data == NULL) return E_INVALIDARG; SOSDacEnter(); MethodTable* pMT = PTR_MethodTable(TO_TADDR(mt)); BOOL bIsFree = FALSE; if (!pMT || !DacValidateMethodTable(pMT, bIsFree)) { hr = E_INVALIDARG; } else { data->wNumInstanceFields = pMT->GetNumInstanceFields(); data->wNumStaticFields = pMT->GetNumStaticFields(); data->wNumThreadStaticFields = pMT->GetNumThreadStaticFields(); data->FirstField = PTR_TO_TADDR(pMT->GetClass()->GetFieldDescList()); data->wContextStaticsSize = 0; data->wContextStaticOffset = 0; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodTableCollectibleData(CLRDATA_ADDRESS mt, struct DacpMethodTableCollectibleData *data) { if (mt == 0 || data == NULL) return E_INVALIDARG; SOSDacEnter(); MethodTable* pMT = PTR_MethodTable(TO_TADDR(mt)); BOOL bIsFree = FALSE; if (!pMT || !DacValidateMethodTable(pMT, bIsFree)) { hr = E_INVALIDARG; } else { data->bCollectible = pMT->Collectible(); if (data->bCollectible) { data->LoaderAllocatorObjectHandle = pMT->GetLoaderAllocatorObjectHandle(); } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodTableTransparencyData(CLRDATA_ADDRESS mt, struct DacpMethodTableTransparencyData *pTransparencyData) { if (mt == 0 || pTransparencyData == NULL) return E_INVALIDARG; SOSDacEnter(); MethodTable *pMT = PTR_MethodTable(TO_TADDR(mt)); BOOL bIsFree = FALSE; if (!DacValidateMethodTable(pMT, bIsFree)) { hr = E_INVALIDARG; } else { ZeroMemory(pTransparencyData, sizeof(DacpMethodTableTransparencyData)); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodTableForEEClass(CLRDATA_ADDRESS eeClass, CLRDATA_ADDRESS *value) { if (eeClass == 0 || value == NULL) return E_INVALIDARG; SOSDacEnter(); EEClass * pClass = PTR_EEClass(TO_TADDR(eeClass)); if (!DacValidateEEClass(pClass)) { hr = E_INVALIDARG; } else { *value = HOST_CDADDR(pClass->GetMethodTable()); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetFrameName(CLRDATA_ADDRESS vtable, unsigned int count, _Inout_updates_z_(count) WCHAR *frameName, unsigned int *pNeeded) { if (vtable == 0) return E_INVALIDARG; SOSDacEnter(); PWSTR pszName = DacGetVtNameW(CLRDATA_ADDRESS_TO_TADDR(vtable)); if (pszName == NULL) { hr = E_INVALIDARG; } else { // Turn from bytes to wide characters unsigned int len = (unsigned int)wcslen(pszName); if (frameName) { wcsncpy_s(frameName, count, pszName, _TRUNCATE); if (pNeeded) { if (count < len) *pNeeded = count - 1; else *pNeeded = len; } } else if (pNeeded) { *pNeeded = len + 1; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetPEFileName(CLRDATA_ADDRESS addr, unsigned int count, _Inout_updates_z_(count) WCHAR *fileName, unsigned int *pNeeded) { if (addr == 0 || (fileName == NULL && pNeeded == NULL) || (fileName != NULL && count == 0)) return E_INVALIDARG; SOSDacEnter(); PEAssembly* pPEAssembly = PTR_PEAssembly(TO_TADDR(addr)); // Turn from bytes to wide characters if (!pPEAssembly->GetPath().IsEmpty()) { if (!pPEAssembly->GetPath().DacGetUnicode(count, fileName, pNeeded)) hr = E_FAIL; } else if (!pPEAssembly->IsDynamic()) { StackSString displayName; pPEAssembly->GetDisplayName(displayName, 0); if (displayName.IsEmpty()) { if (fileName) fileName[0] = 0; if (pNeeded) *pNeeded = 1; } else { unsigned int len = displayName.GetCount()+1; if (fileName) { wcsncpy_s(fileName, count, displayName.GetUnicode(), _TRUNCATE); if (count < len) len = count; } if (pNeeded) *pNeeded = len; } } else { if (fileName && count) fileName[0] = 0; if (pNeeded) *pNeeded = 1; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetPEFileBase(CLRDATA_ADDRESS addr, CLRDATA_ADDRESS *base) { if (addr == 0 || base == NULL) return E_INVALIDARG; SOSDacEnter(); PEAssembly* pPEAssembly = PTR_PEAssembly(TO_TADDR(addr)); // More fields later? if (!pPEAssembly->IsDynamic()) *base = TO_CDADDR(pPEAssembly->GetIJWBase()); else *base = NULL; SOSDacLeave(); return hr; } DWORD DACGetNumComponents(TADDR addr, ICorDebugDataTarget* target) { // For an object pointer, this attempts to read the number of // array components. addr+=sizeof(size_t); ULONG32 returned = 0; DWORD Value = NULL; HRESULT hr = target->ReadVirtual(addr, (PBYTE)&Value, sizeof(DWORD), &returned); if ((hr != S_OK) || (returned != sizeof(DWORD))) { return 0; } return Value; } HRESULT ClrDataAccess::GetObjectData(CLRDATA_ADDRESS addr, struct DacpObjectData *objectData) { if (addr == 0 || objectData == NULL) return E_INVALIDARG; SOSDacEnter(); ZeroMemory (objectData, sizeof(DacpObjectData)); TADDR mtTADDR = DACGetMethodTableFromObjectPointer(CLRDATA_ADDRESS_TO_TADDR(addr),m_pTarget); if (mtTADDR==NULL) hr = E_INVALIDARG; BOOL bFree = FALSE; MethodTable *mt = NULL; if (SUCCEEDED(hr)) { mt = PTR_MethodTable(mtTADDR); if (!DacValidateMethodTable(mt, bFree)) hr = E_INVALIDARG; } if (SUCCEEDED(hr)) { objectData->MethodTable = HOST_CDADDR(mt); objectData->Size = mt->GetBaseSize(); if (mt->GetComponentSize()) { objectData->Size += (DACGetNumComponents(CLRDATA_ADDRESS_TO_TADDR(addr),m_pTarget) * mt->GetComponentSize()); objectData->dwComponentSize = mt->GetComponentSize(); } if (bFree) { objectData->ObjectType = OBJ_FREE; } else { if (objectData->MethodTable == HOST_CDADDR(g_pStringClass)) { objectData->ObjectType = OBJ_STRING; } else if (objectData->MethodTable == HOST_CDADDR(g_pObjectClass)) { objectData->ObjectType = OBJ_OBJECT; } else if (mt->IsArray()) { objectData->ObjectType = OBJ_ARRAY; // For now, go ahead and instantiate array classes. // TODO: avoid instantiating even object Arrays in the host. // NOTE: This code is carefully written to deal with MethodTable fields // in the array object having the mark bit set (because we may // be in mark phase when this function is called). ArrayBase *pArrayObj = PTR_ArrayBase(TO_TADDR(addr)); objectData->ElementType = mt->GetArrayElementType(); TypeHandle thElem = mt->GetArrayElementTypeHandle(); TypeHandle thCur = thElem; while (thCur.IsArray()) thCur = thCur.GetArrayElementTypeHandle(); TADDR mtCurTADDR = thCur.AsTAddr(); if (!DacValidateMethodTable(PTR_MethodTable(mtCurTADDR), bFree)) { hr = E_INVALIDARG; } else { objectData->ElementTypeHandle = (CLRDATA_ADDRESS)(thElem.AsTAddr()); objectData->dwRank = mt->GetRank(); objectData->dwNumComponents = pArrayObj->GetNumComponents (); objectData->ArrayDataPtr = PTR_CDADDR(pArrayObj->GetDataPtr (TRUE)); objectData->ArrayBoundsPtr = HOST_CDADDR(pArrayObj->GetBoundsPtr()); objectData->ArrayLowerBoundsPtr = HOST_CDADDR(pArrayObj->GetLowerBoundsPtr()); } } else { objectData->ObjectType = OBJ_OTHER; } } } #ifdef FEATURE_COMINTEROP if (SUCCEEDED(hr)) { EX_TRY_ALLOW_DATATARGET_MISSING_MEMORY { PTR_SyncBlock pSyncBlk = DACGetSyncBlockFromObjectPointer(CLRDATA_ADDRESS_TO_TADDR(addr), m_pTarget); if (pSyncBlk != NULL) { // see if we have an RCW and/or CCW associated with this object PTR_InteropSyncBlockInfo pInfo = pSyncBlk->GetInteropInfoNoCreate(); if (pInfo != NULL) { objectData->RCW = TO_CDADDR(pInfo->DacGetRawRCW()); objectData->CCW = HOST_CDADDR(pInfo->GetCCW()); } } } EX_END_CATCH_ALLOW_DATATARGET_MISSING_MEMORY; } #endif // FEATURE_COMINTEROP SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAppDomainList(unsigned int count, CLRDATA_ADDRESS values[], unsigned int *fetched) { SOSDacEnter(); AppDomainIterator ai(FALSE); unsigned int i = 0; while (ai.Next() && (i < count)) { if (values) values[i] = HOST_CDADDR(ai.GetDomain()); i++; } if (fetched) *fetched = i; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAppDomainStoreData(struct DacpAppDomainStoreData *adsData) { SOSDacEnter(); adsData->systemDomain = HOST_CDADDR(SystemDomain::System()); adsData->sharedDomain = NULL; // Get an accurate count of appdomains. adsData->DomainCount = 0; AppDomainIterator ai(FALSE); while (ai.Next()) adsData->DomainCount++; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAppDomainData(CLRDATA_ADDRESS addr, struct DacpAppDomainData *appdomainData) { SOSDacEnter(); if (addr == 0) { hr = E_INVALIDARG; } else { PTR_BaseDomain pBaseDomain = PTR_BaseDomain(TO_TADDR(addr)); ZeroMemory(appdomainData, sizeof(DacpAppDomainData)); appdomainData->AppDomainPtr = PTR_CDADDR(pBaseDomain); PTR_LoaderAllocator pLoaderAllocator = pBaseDomain->GetLoaderAllocator(); appdomainData->pHighFrequencyHeap = HOST_CDADDR(pLoaderAllocator->GetHighFrequencyHeap()); appdomainData->pLowFrequencyHeap = HOST_CDADDR(pLoaderAllocator->GetLowFrequencyHeap()); appdomainData->pStubHeap = HOST_CDADDR(pLoaderAllocator->GetStubHeap()); appdomainData->appDomainStage = STAGE_OPEN; if (pBaseDomain->IsAppDomain()) { AppDomain * pAppDomain = pBaseDomain->AsAppDomain(); appdomainData->DomainLocalBlock = 0; appdomainData->pDomainLocalModules = 0; appdomainData->dwId = DefaultADID; appdomainData->appDomainStage = (DacpAppDomainDataStage)pAppDomain->m_Stage.Load(); if (pAppDomain->IsActive()) { // The assembly list is not valid in a closed appdomain. AppDomain::AssemblyIterator i = pAppDomain->IterateAssembliesEx((AssemblyIterationFlags)( kIncludeLoading | kIncludeLoaded | kIncludeExecution)); CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly; while (i.Next(pDomainAssembly.This())) { if (pDomainAssembly->IsLoaded()) { appdomainData->AssemblyCount++; } } AppDomain::FailedAssemblyIterator j = pAppDomain->IterateFailedAssembliesEx(); while (j.Next()) { appdomainData->FailedAssemblyCount++; } } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetFailedAssemblyData(CLRDATA_ADDRESS assembly, unsigned int *pContext, HRESULT *pResult) { if (assembly == NULL || (pContext == NULL && pResult == NULL)) { return E_INVALIDARG; } SOSDacEnter(); FailedAssembly* pAssembly = PTR_FailedAssembly(TO_TADDR(assembly)); if (!pAssembly) { hr = E_INVALIDARG; } else { if (pResult) *pResult = pAssembly->error; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetFailedAssemblyLocation(CLRDATA_ADDRESS assembly, unsigned int count, _Inout_updates_z_(count) WCHAR *location, unsigned int *pNeeded) { if (assembly == NULL || (location == NULL && pNeeded == NULL) || (location != NULL && count == 0)) return E_INVALIDARG; SOSDacEnter(); FailedAssembly* pAssembly = PTR_FailedAssembly(TO_TADDR(assembly)); // Turn from bytes to wide characters if (!pAssembly->location.IsEmpty()) { if (!pAssembly->location.DacGetUnicode(count, location, pNeeded)) { hr = E_FAIL; } } else { if (pNeeded) *pNeeded = 1; if (location) location[0] = 0; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetFailedAssemblyDisplayName(CLRDATA_ADDRESS assembly, unsigned int count, _Inout_updates_z_(count) WCHAR *name, unsigned int *pNeeded) { if (assembly == NULL || (name == NULL && pNeeded == NULL) || (name != NULL && count == 0)) return E_INVALIDARG; SOSDacEnter(); FailedAssembly* pAssembly = PTR_FailedAssembly(TO_TADDR(assembly)); if (!pAssembly->displayName.IsEmpty()) { if (!pAssembly->displayName.DacGetUnicode(count, name, pNeeded)) { hr = E_FAIL; } } else { if (pNeeded) *pNeeded = 1; if (name) name[0] = 0; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAssemblyList(CLRDATA_ADDRESS addr, int count, CLRDATA_ADDRESS values[], int *pNeeded) { if (addr == NULL) return E_INVALIDARG; SOSDacEnter(); BaseDomain* pBaseDomain = PTR_BaseDomain(TO_TADDR(addr)); int n=0; if (pBaseDomain->IsAppDomain()) { AppDomain::AssemblyIterator i = pBaseDomain->AsAppDomain()->IterateAssembliesEx( (AssemblyIterationFlags)(kIncludeLoading | kIncludeLoaded | kIncludeExecution)); CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly; if (values) { while (i.Next(pDomainAssembly.This()) && (n < count)) { if (pDomainAssembly->IsLoaded()) { CollectibleAssemblyHolder<Assembly *> pAssembly = pDomainAssembly->GetAssembly(); // Note: DAC doesn't need to keep the assembly alive - see code:CollectibleAssemblyHolder#CAH_DAC values[n++] = HOST_CDADDR(pAssembly.Extract()); } } } else { while (i.Next(pDomainAssembly.This())) if (pDomainAssembly->IsLoaded()) n++; } if (pNeeded) *pNeeded = n; } else { // The only other type of BaseDomain is the SystemDomain, and we shouldn't be asking // for the assemblies in it. _ASSERTE(false); hr = E_INVALIDARG; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetFailedAssemblyList(CLRDATA_ADDRESS appDomain, int count, CLRDATA_ADDRESS values[], unsigned int *pNeeded) { if ((appDomain == NULL) || (values == NULL && pNeeded == NULL)) { return E_INVALIDARG; } SOSDacEnter(); AppDomain* pAppDomain = PTR_AppDomain(TO_TADDR(appDomain)); int n=0; AppDomain::FailedAssemblyIterator i = pAppDomain->IterateFailedAssembliesEx(); while (i.Next() && n<=count) { if (values) values[n] = HOST_CDADDR(i.GetFailedAssembly()); n++; } if (pNeeded) *pNeeded = n; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAppDomainName(CLRDATA_ADDRESS addr, unsigned int count, _Inout_updates_z_(count) WCHAR *name, unsigned int *pNeeded) { SOSDacEnter(); PTR_BaseDomain pBaseDomain = PTR_BaseDomain(TO_TADDR(addr)); if (!pBaseDomain->IsAppDomain()) { // Shared domain and SystemDomain don't have this field. if (pNeeded) *pNeeded = 1; if (name) name[0] = 0; } else { AppDomain* pAppDomain = pBaseDomain->AsAppDomain(); if (!pAppDomain->m_friendlyName.IsEmpty()) { if (!pAppDomain->m_friendlyName.DacGetUnicode(count, name, pNeeded)) { hr = E_FAIL; } } else { if (pNeeded) *pNeeded = 1; if (name) name[0] = 0; hr = S_OK; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetApplicationBase(CLRDATA_ADDRESS appDomain, int count, _Inout_updates_z_(count) WCHAR *base, unsigned int *pNeeded) { // Method is not supported on CoreCLR return E_FAIL; } HRESULT ClrDataAccess::GetPrivateBinPaths(CLRDATA_ADDRESS appDomain, int count, _Inout_updates_z_(count) WCHAR *paths, unsigned int *pNeeded) { // Method is not supported on CoreCLR return E_FAIL; } HRESULT ClrDataAccess::GetAppDomainConfigFile(CLRDATA_ADDRESS appDomain, int count, _Inout_updates_z_(count) WCHAR *configFile, unsigned int *pNeeded) { // Method is not supported on CoreCLR return E_FAIL; } HRESULT ClrDataAccess::GetAssemblyData(CLRDATA_ADDRESS cdBaseDomainPtr, CLRDATA_ADDRESS assembly, struct DacpAssemblyData *assemblyData) { if (assembly == NULL && cdBaseDomainPtr == NULL) { return E_INVALIDARG; } SOSDacEnter(); Assembly* pAssembly = PTR_Assembly(TO_TADDR(assembly)); // Make sure conditionally-assigned fields like AssemblySecDesc, LoadContext, etc. are zeroed ZeroMemory(assemblyData, sizeof(DacpAssemblyData)); // If the specified BaseDomain is an AppDomain, get a pointer to it AppDomain * pDomain = NULL; if (cdBaseDomainPtr != NULL) { assemblyData->BaseDomainPtr = cdBaseDomainPtr; PTR_BaseDomain baseDomain = PTR_BaseDomain(TO_TADDR(cdBaseDomainPtr)); if( baseDomain->IsAppDomain() ) pDomain = baseDomain->AsAppDomain(); } assemblyData->AssemblyPtr = HOST_CDADDR(pAssembly); assemblyData->ClassLoader = HOST_CDADDR(pAssembly->GetLoader()); assemblyData->ParentDomain = HOST_CDADDR(pAssembly->GetDomain()); assemblyData->isDynamic = pAssembly->IsDynamic(); assemblyData->ModuleCount = 0; assemblyData->isDomainNeutral = FALSE; if (pAssembly->GetModule()) { assemblyData->ModuleCount++; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAssemblyName(CLRDATA_ADDRESS assembly, unsigned int count, _Inout_updates_z_(count) WCHAR *name, unsigned int *pNeeded) { SOSDacEnter(); Assembly* pAssembly = PTR_Assembly(TO_TADDR(assembly)); if (name) name[0] = 0; if (!pAssembly->GetPEAssembly()->GetPath().IsEmpty()) { if (!pAssembly->GetPEAssembly()->GetPath().DacGetUnicode(count, name, pNeeded)) hr = E_FAIL; else if (name) name[count-1] = 0; } else if (!pAssembly->GetPEAssembly()->IsDynamic()) { StackSString displayName; pAssembly->GetPEAssembly()->GetDisplayName(displayName, 0); const WCHAR *val = displayName.GetUnicode(); if (pNeeded) *pNeeded = displayName.GetCount() + 1; if (name && count) { wcsncpy_s(name, count, val, _TRUNCATE); name[count-1] = 0; } } else { hr = E_FAIL; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAssemblyLocation(CLRDATA_ADDRESS assembly, int count, _Inout_updates_z_(count) WCHAR *location, unsigned int *pNeeded) { if ((assembly == NULL) || (location == NULL && pNeeded == NULL) || (location != NULL && count == 0)) { return E_INVALIDARG; } SOSDacEnter(); Assembly* pAssembly = PTR_Assembly(TO_TADDR(assembly)); // Turn from bytes to wide characters if (!pAssembly->GetPEAssembly()->GetPath().IsEmpty()) { if (!pAssembly->GetPEAssembly()->GetPath(). DacGetUnicode(count, location, pNeeded)) { hr = E_FAIL; } } else { if (location) location[0] = 0; if (pNeeded) *pNeeded = 1; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAssemblyModuleList(CLRDATA_ADDRESS assembly, unsigned int count, CLRDATA_ADDRESS modules[], unsigned int *pNeeded) { if (assembly == 0) return E_INVALIDARG; SOSDacEnter(); Assembly* pAssembly = PTR_Assembly(TO_TADDR(assembly)); if (modules) { if (pAssembly->GetModule() && count > 0) modules[0] = HOST_CDADDR(pAssembly->GetModule()); } if (pNeeded) *pNeeded = 1; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetGCHeapDetails(CLRDATA_ADDRESS heap, struct DacpGcHeapDetails *details) { if (heap == 0 || details == NULL) return E_INVALIDARG; SOSDacEnter(); // doesn't make sense to call this on WKS mode if (!GCHeapUtilities::IsServerHeap()) hr = E_INVALIDARG; else #ifdef FEATURE_SVR_GC hr = ServerGCHeapDetails(heap, details); #else hr = E_NOTIMPL; #endif SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetGCHeapStaticData(struct DacpGcHeapDetails *detailsData) { // Make sure ClrDataAccess::ServerGCHeapDetails() is updated as well. if (detailsData == NULL) { return E_INVALIDARG; } SOSDacEnter(); detailsData->heapAddr = NULL; detailsData->lowest_address = PTR_CDADDR(g_lowest_address); detailsData->highest_address = PTR_CDADDR(g_highest_address); detailsData->current_c_gc_state = (CLRDATA_ADDRESS)*g_gcDacGlobals->current_c_gc_state; detailsData->alloc_allocated = (CLRDATA_ADDRESS)*g_gcDacGlobals->alloc_allocated; detailsData->ephemeral_heap_segment = (CLRDATA_ADDRESS)*g_gcDacGlobals->ephemeral_heap_segment; detailsData->card_table = PTR_CDADDR(g_card_table); detailsData->mark_array = (CLRDATA_ADDRESS)*g_gcDacGlobals->mark_array; detailsData->next_sweep_obj = (CLRDATA_ADDRESS)*g_gcDacGlobals->next_sweep_obj; if (g_gcDacGlobals->saved_sweep_ephemeral_seg != nullptr) { detailsData->saved_sweep_ephemeral_seg = (CLRDATA_ADDRESS)*g_gcDacGlobals->saved_sweep_ephemeral_seg; detailsData->saved_sweep_ephemeral_start = (CLRDATA_ADDRESS)*g_gcDacGlobals->saved_sweep_ephemeral_start; } else { // with regions, we don't have these variables anymore // use special value -1 in saved_sweep_ephemeral_seg to signal the region case detailsData->saved_sweep_ephemeral_seg = (CLRDATA_ADDRESS)-1; detailsData->saved_sweep_ephemeral_start = 0; } detailsData->background_saved_lowest_address = (CLRDATA_ADDRESS)*g_gcDacGlobals->background_saved_lowest_address; detailsData->background_saved_highest_address = (CLRDATA_ADDRESS)*g_gcDacGlobals->background_saved_highest_address; // get bounds for the different generations for (unsigned int i=0; i < DAC_NUMBERGENERATIONS; i++) { dac_generation generation = GenerationTableIndex(g_gcDacGlobals->generation_table, i); detailsData->generation_table[i].start_segment = (CLRDATA_ADDRESS) dac_cast<TADDR>(generation.start_segment); detailsData->generation_table[i].allocation_start = (CLRDATA_ADDRESS) generation.allocation_start; gc_alloc_context alloc_context = generation.allocation_context; detailsData->generation_table[i].allocContextPtr = (CLRDATA_ADDRESS)alloc_context.alloc_ptr; detailsData->generation_table[i].allocContextLimit = (CLRDATA_ADDRESS)alloc_context.alloc_limit; } if (g_gcDacGlobals->finalize_queue.IsValid()) { DPTR(dac_finalize_queue) fq = Dereference(g_gcDacGlobals->finalize_queue); DPTR(uint8_t*) fillPointersTable = dac_cast<TADDR>(fq) + offsetof(dac_finalize_queue, m_FillPointers); for (unsigned int i = 0; i < DAC_NUMBERGENERATIONS + 3; i++) { detailsData->finalization_fill_pointers[i] = (CLRDATA_ADDRESS)*TableIndex(fillPointersTable, i, sizeof(uint8_t*)); } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetHeapSegmentData(CLRDATA_ADDRESS seg, struct DacpHeapSegmentData *heapSegment) { if (seg == 0 || heapSegment == NULL) return E_INVALIDARG; SOSDacEnter(); if (GCHeapUtilities::IsServerHeap()) { #if !defined(FEATURE_SVR_GC) _ASSERTE(0); #else // !defined(FEATURE_SVR_GC) hr = GetServerHeapData(seg, heapSegment); #endif //!defined(FEATURE_SVR_GC) } else { dac_heap_segment *pSegment = __DPtr<dac_heap_segment>(TO_TADDR(seg)); if (!pSegment) { hr = E_INVALIDARG; } else { heapSegment->segmentAddr = seg; heapSegment->allocated = (CLRDATA_ADDRESS)(ULONG_PTR) pSegment->allocated; heapSegment->committed = (CLRDATA_ADDRESS)(ULONG_PTR) pSegment->committed; heapSegment->reserved = (CLRDATA_ADDRESS)(ULONG_PTR) pSegment->reserved; heapSegment->used = (CLRDATA_ADDRESS)(ULONG_PTR) pSegment->used; heapSegment->mem = (CLRDATA_ADDRESS)(ULONG_PTR) pSegment->mem; heapSegment->next = (CLRDATA_ADDRESS)dac_cast<TADDR>(pSegment->next); heapSegment->flags = pSegment->flags; heapSegment->gc_heap = NULL; heapSegment->background_allocated = (CLRDATA_ADDRESS)(ULONG_PTR)pSegment->background_allocated; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetGCHeapList(unsigned int count, CLRDATA_ADDRESS heaps[], unsigned int *pNeeded) { SOSDacEnter(); // make sure we called this in appropriate circumstances (i.e., we have multiple heaps) if (GCHeapUtilities::IsServerHeap()) { #if !defined(FEATURE_SVR_GC) _ASSERTE(0); #else // !defined(FEATURE_SVR_GC) unsigned int heapCount = GCHeapCount(); if (pNeeded) *pNeeded = heapCount; if (heaps) { // get the heap locations if (count == heapCount) hr = GetServerHeaps(heaps, m_pTarget); else hr = E_INVALIDARG; } #endif // !defined(FEATURE_SVR_GC) } else { hr = E_FAIL; // doesn't make sense to call this on WKS mode } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetGCHeapData(struct DacpGcHeapData *gcheapData) { if (gcheapData == NULL) return E_INVALIDARG; SOSDacEnter(); // we need to check and see if g_heap_type // is GC_HEAP_INVALID, in which case we fail. ULONG32 gcHeapValue = g_heap_type; // GC_HEAP_TYPE has three possible values: // GC_HEAP_INVALID = 0, // GC_HEAP_WKS = 1, // GC_HEAP_SVR = 2 // If we get something other than that, we probably read the wrong location. _ASSERTE(gcHeapValue >= GC_HEAP_INVALID && gcHeapValue <= GC_HEAP_SVR); // we have GC_HEAP_INVALID if gcHeapValue == 0, so we're done - we haven't // initialized the heap yet. if (gcHeapValue == GC_HEAP_INVALID) { hr = E_FAIL; goto cleanup; } // Now we can get other important information about the heap // We can use GCHeapUtilities::IsServerHeap here because we have already validated // that the heap is in a valid state. We couldn't use it above, because IsServerHeap // asserts if the heap type is GC_HEAP_INVALID. gcheapData->g_max_generation = *g_gcDacGlobals->max_gen; gcheapData->bServerMode = GCHeapUtilities::IsServerHeap(); gcheapData->bGcStructuresValid = *g_gcDacGlobals->gc_structures_invalid_cnt == 0; if (GCHeapUtilities::IsServerHeap()) { #if !defined (FEATURE_SVR_GC) _ASSERTE(0); gcheapData->HeapCount = 1; #else // !defined (FEATURE_SVR_GC) gcheapData->HeapCount = GCHeapCount(); #endif // !defined (FEATURE_SVR_GC) } else { gcheapData->HeapCount = 1; } cleanup: ; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetOOMStaticData(struct DacpOomData *oomData) { if (oomData == NULL) return E_INVALIDARG; SOSDacEnter(); memset(oomData, 0, sizeof(DacpOomData)); if (!GCHeapUtilities::IsServerHeap()) { oom_history* pOOMInfo = g_gcDacGlobals->oom_info; oomData->reason = pOOMInfo->reason; oomData->alloc_size = pOOMInfo->alloc_size; oomData->available_pagefile_mb = pOOMInfo->available_pagefile_mb; oomData->gc_index = pOOMInfo->gc_index; oomData->fgm = pOOMInfo->fgm; oomData->size = pOOMInfo->size; oomData->loh_p = pOOMInfo->loh_p; } else { hr = E_FAIL; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetOOMData(CLRDATA_ADDRESS oomAddr, struct DacpOomData *data) { if (oomAddr == 0 || data == NULL) return E_INVALIDARG; SOSDacEnter(); memset(data, 0, sizeof(DacpOomData)); if (!GCHeapUtilities::IsServerHeap()) hr = E_FAIL; // doesn't make sense to call this on WKS mode #ifdef FEATURE_SVR_GC else hr = ServerOomData(oomAddr, data); #else _ASSERTE_MSG(false, "IsServerHeap returned true but FEATURE_SVR_GC not defined"); hr = E_NOTIMPL; #endif //FEATURE_SVR_GC SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetGCGlobalMechanisms(size_t* globalMechanisms) { #ifdef GC_CONFIG_DRIVEN if (globalMechanisms == NULL) return E_INVALIDARG; SOSDacEnter(); memset(globalMechanisms, 0, (sizeof(size_t) * MAX_GLOBAL_GC_MECHANISMS_COUNT)); for (int i = 0; i < MAX_GLOBAL_GC_MECHANISMS_COUNT; i++) { globalMechanisms[i] = g_gcDacGlobals->gc_global_mechanisms[i]; } SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif //GC_CONFIG_DRIVEN } HRESULT ClrDataAccess::GetGCInterestingInfoStaticData(struct DacpGCInterestingInfoData *data) { #ifdef GC_CONFIG_DRIVEN if (data == NULL) return E_INVALIDARG; static_assert_no_msg(DAC_NUMBERGENERATIONS == NUMBERGENERATIONS); static_assert_no_msg(DAC_NUM_GC_DATA_POINTS == NUM_GC_DATA_POINTS); static_assert_no_msg(DAC_MAX_COMPACT_REASONS_COUNT == MAX_COMPACT_REASONS_COUNT); static_assert_no_msg(DAC_MAX_EXPAND_MECHANISMS_COUNT == MAX_EXPAND_MECHANISMS_COUNT); static_assert_no_msg(DAC_MAX_GC_MECHANISM_BITS_COUNT == MAX_GC_MECHANISM_BITS_COUNT); SOSDacEnter(); memset(data, 0, sizeof(DacpGCInterestingInfoData)); if (g_heap_type != GC_HEAP_SVR) { for (int i = 0; i < NUM_GC_DATA_POINTS; i++) data->interestingDataPoints[i] = g_gcDacGlobals->interesting_data_per_heap[i]; for (int i = 0; i < MAX_COMPACT_REASONS_COUNT; i++) data->compactReasons[i] = g_gcDacGlobals->compact_reasons_per_heap[i]; for (int i = 0; i < MAX_EXPAND_MECHANISMS_COUNT; i++) data->expandMechanisms[i] = g_gcDacGlobals->expand_mechanisms_per_heap[i]; for (int i = 0; i < MAX_GC_MECHANISM_BITS_COUNT; i++) data->bitMechanisms[i] = g_gcDacGlobals->interesting_mechanism_bits_per_heap[i]; } else { hr = E_FAIL; } SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif //GC_CONFIG_DRIVEN } HRESULT ClrDataAccess::GetGCInterestingInfoData(CLRDATA_ADDRESS interestingInfoAddr, struct DacpGCInterestingInfoData *data) { #ifdef GC_CONFIG_DRIVEN if (interestingInfoAddr == 0 || data == NULL) return E_INVALIDARG; SOSDacEnter(); memset(data, 0, sizeof(DacpGCInterestingInfoData)); if (!GCHeapUtilities::IsServerHeap()) hr = E_FAIL; // doesn't make sense to call this on WKS mode #ifdef FEATURE_SVR_GC else hr = ServerGCInterestingInfoData(interestingInfoAddr, data); #else _ASSERTE_MSG(false, "IsServerHeap returned true but FEATURE_SVR_GC not defined"); hr = E_NOTIMPL; #endif //FEATURE_SVR_GC SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif //GC_CONFIG_DRIVEN } HRESULT ClrDataAccess::GetHeapAnalyzeData(CLRDATA_ADDRESS addr, struct DacpGcHeapAnalyzeData *data) { if (addr == 0 || data == NULL) return E_INVALIDARG; SOSDacEnter(); if (!GCHeapUtilities::IsServerHeap()) hr = E_FAIL; // doesn't make sense to call this on WKS mode #ifdef FEATURE_SVR_GC else hr = ServerGCHeapAnalyzeData(addr, data); #else _ASSERTE_MSG(false, "IsServerHeap returned true but FEATURE_SVR_GC not defined"); hr = E_NOTIMPL; #endif //FEATURE_SVR_GC SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetHeapAnalyzeStaticData(struct DacpGcHeapAnalyzeData *analyzeData) { if (analyzeData == NULL) return E_INVALIDARG; SOSDacEnter(); analyzeData->internal_root_array = dac_cast<TADDR>(g_gcDacGlobals->internal_root_array); analyzeData->internal_root_array_index = *g_gcDacGlobals->internal_root_array_index; analyzeData->heap_analyze_success = *g_gcDacGlobals->heap_analyze_success; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetUsefulGlobals(struct DacpUsefulGlobalsData *globalsData) { if (globalsData == NULL) return E_INVALIDARG; SOSDacEnter(); TypeHandle objArray = g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]; if (objArray != NULL) globalsData->ArrayMethodTable = HOST_CDADDR(objArray.AsMethodTable()); else globalsData->ArrayMethodTable = 0; globalsData->StringMethodTable = HOST_CDADDR(g_pStringClass); globalsData->ObjectMethodTable = HOST_CDADDR(g_pObjectClass); globalsData->ExceptionMethodTable = HOST_CDADDR(g_pExceptionClass); globalsData->FreeMethodTable = HOST_CDADDR(g_pFreeObjectMethodTable); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetNestedExceptionData(CLRDATA_ADDRESS exception, CLRDATA_ADDRESS *exceptionObject, CLRDATA_ADDRESS *nextNestedException) { if (exception == 0 || exceptionObject == NULL || nextNestedException == NULL) return E_INVALIDARG; SOSDacEnter(); #ifdef FEATURE_EH_FUNCLETS ExceptionTracker *pExData = PTR_ExceptionTracker(TO_TADDR(exception)); #else ExInfo *pExData = PTR_ExInfo(TO_TADDR(exception)); #endif // FEATURE_EH_FUNCLETS if (!pExData) { hr = E_INVALIDARG; } else { *exceptionObject = TO_CDADDR(*PTR_TADDR(pExData->m_hThrowable)); *nextNestedException = PTR_HOST_TO_TADDR(pExData->m_pPrevNestedInfo); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetDomainLocalModuleData(CLRDATA_ADDRESS addr, struct DacpDomainLocalModuleData *pLocalModuleData) { if (addr == 0 || pLocalModuleData == NULL) return E_INVALIDARG; SOSDacEnter(); DomainLocalModule* pLocalModule = PTR_DomainLocalModule(TO_TADDR(addr)); pLocalModuleData->pGCStaticDataStart = TO_CDADDR(PTR_TO_TADDR(pLocalModule->GetPrecomputedGCStaticsBasePointer())); pLocalModuleData->pNonGCStaticDataStart = TO_CDADDR(pLocalModule->GetPrecomputedNonGCStaticsBasePointer()); pLocalModuleData->pDynamicClassTable = PTR_CDADDR(pLocalModule->m_pDynamicClassTable.Load()); pLocalModuleData->pClassData = (TADDR) (PTR_HOST_MEMBER_TADDR(DomainLocalModule, pLocalModule, m_pDataBlob)); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetDomainLocalModuleDataFromModule(CLRDATA_ADDRESS addr, struct DacpDomainLocalModuleData *pLocalModuleData) { if (addr == 0 || pLocalModuleData == NULL) return E_INVALIDARG; SOSDacEnter(); Module* pModule = PTR_Module(TO_TADDR(addr)); DomainLocalModule* pLocalModule = PTR_DomainLocalModule(pModule->GetDomainLocalModule()); if (!pLocalModule) { hr = E_INVALIDARG; } else { pLocalModuleData->pGCStaticDataStart = TO_CDADDR(PTR_TO_TADDR(pLocalModule->GetPrecomputedGCStaticsBasePointer())); pLocalModuleData->pNonGCStaticDataStart = TO_CDADDR(pLocalModule->GetPrecomputedNonGCStaticsBasePointer()); pLocalModuleData->pDynamicClassTable = PTR_CDADDR(pLocalModule->m_pDynamicClassTable.Load()); pLocalModuleData->pClassData = (TADDR) (PTR_HOST_MEMBER_TADDR(DomainLocalModule, pLocalModule, m_pDataBlob)); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetDomainLocalModuleDataFromAppDomain(CLRDATA_ADDRESS appDomainAddr, int moduleID, struct DacpDomainLocalModuleData *pLocalModuleData) { // CoreCLR does not support multi-appdomain shared assembly loading. Thus, a non-pointer sized moduleID cannot exist. return E_INVALIDARG; } HRESULT ClrDataAccess::GetThreadLocalModuleData(CLRDATA_ADDRESS thread, unsigned int index, struct DacpThreadLocalModuleData *pLocalModuleData) { if (pLocalModuleData == NULL) return E_INVALIDARG; SOSDacEnter(); pLocalModuleData->threadAddr = thread; pLocalModuleData->ModuleIndex = index; PTR_Thread pThread = PTR_Thread(TO_TADDR(thread)); PTR_ThreadLocalBlock pLocalBlock = ThreadStatics::GetCurrentTLB(pThread); PTR_ThreadLocalModule pLocalModule = pLocalBlock->GetTLMIfExists(ModuleIndex(index)); if (!pLocalModule) { hr = E_INVALIDARG; } else { pLocalModuleData->pGCStaticDataStart = TO_CDADDR(PTR_TO_TADDR(pLocalModule->GetPrecomputedGCStaticsBasePointer())); pLocalModuleData->pNonGCStaticDataStart = TO_CDADDR(pLocalModule->GetPrecomputedNonGCStaticsBasePointer()); pLocalModuleData->pDynamicClassTable = PTR_CDADDR(pLocalModule->m_pDynamicClassTable); pLocalModuleData->pClassData = (TADDR) (PTR_HOST_MEMBER_TADDR(ThreadLocalModule, pLocalModule, m_pDataBlob)); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetHandleEnum(ISOSHandleEnum **ppHandleEnum) { unsigned int types[] = {HNDTYPE_WEAK_SHORT, HNDTYPE_WEAK_LONG, HNDTYPE_STRONG, HNDTYPE_PINNED, HNDTYPE_VARIABLE, HNDTYPE_DEPENDENT, HNDTYPE_ASYNCPINNED, HNDTYPE_SIZEDREF, #if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS) || defined(FEATURE_OBJCMARSHAL) HNDTYPE_REFCOUNTED, #endif // FEATURE_COMINTEROP || FEATURE_COMWRAPPERS || FEATURE_OBJCMARSHAL #if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS) HNDTYPE_WEAK_NATIVE_COM #endif // FEATURE_COMINTEROP }; return GetHandleEnumForTypes(types, ARRAY_SIZE(types), ppHandleEnum); } HRESULT ClrDataAccess::GetHandleEnumForTypes(unsigned int types[], unsigned int count, ISOSHandleEnum **ppHandleEnum) { if (ppHandleEnum == 0) return E_POINTER; SOSDacEnter(); DacHandleWalker *walker = new DacHandleWalker(); HRESULT hr = walker->Init(this, types, count); if (SUCCEEDED(hr)) hr = walker->QueryInterface(__uuidof(ISOSHandleEnum), (void**)ppHandleEnum); if (FAILED(hr)) delete walker; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetHandleEnumForGC(unsigned int gen, ISOSHandleEnum **ppHandleEnum) { if (ppHandleEnum == 0) return E_POINTER; SOSDacEnter(); unsigned int types[] = {HNDTYPE_WEAK_SHORT, HNDTYPE_WEAK_LONG, HNDTYPE_STRONG, HNDTYPE_PINNED, HNDTYPE_VARIABLE, HNDTYPE_DEPENDENT, HNDTYPE_ASYNCPINNED, HNDTYPE_SIZEDREF, #if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS) || defined(FEATURE_OBJCMARSHAL) HNDTYPE_REFCOUNTED, #endif // FEATURE_COMINTEROP || FEATURE_COMWRAPPERS || FEATURE_OBJCMARSHAL #if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS) HNDTYPE_WEAK_NATIVE_COM #endif // FEATURE_COMINTEROP || FEATURE_COMWRAPPERS }; DacHandleWalker *walker = new DacHandleWalker(); HRESULT hr = walker->Init(this, types, ARRAY_SIZE(types), gen); if (SUCCEEDED(hr)) hr = walker->QueryInterface(__uuidof(ISOSHandleEnum), (void**)ppHandleEnum); if (FAILED(hr)) delete walker; SOSDacLeave(); return hr; } HRESULT ClrDataAccess::TraverseEHInfo(CLRDATA_ADDRESS ip, DUMPEHINFO pFunc, LPVOID token) { if (ip == 0 || pFunc == NULL) return E_INVALIDARG; SOSDacEnter(); EECodeInfo codeInfo(TO_TADDR(ip)); if (!codeInfo.IsValid()) { hr = E_INVALIDARG; } if (SUCCEEDED(hr)) { EH_CLAUSE_ENUMERATOR EnumState; EE_ILEXCEPTION_CLAUSE EHClause; unsigned EHCount; EHCount = codeInfo.GetJitManager()->InitializeEHEnumeration(codeInfo.GetMethodToken(), &EnumState); for (unsigned i = 0; i < EHCount; i++) { codeInfo.GetJitManager()->GetNextEHClause(&EnumState, &EHClause); DACEHInfo deh; ZeroMemory(&deh,sizeof(deh)); if (IsFault(&EHClause)) { deh.clauseType = EHFault; } else if (IsFinally(&EHClause)) { deh.clauseType = EHFinally; } else if (IsFilterHandler(&EHClause)) { deh.clauseType = EHFilter; deh.filterOffset = EHClause.FilterOffset; } else if (IsTypedHandler(&EHClause)) { deh.clauseType = EHTyped; deh.isCatchAllHandler = (&EHClause.TypeHandle == (void*)(size_t)mdTypeRefNil); } else { deh.clauseType = EHUnknown; } if (HasCachedTypeHandle(&EHClause)) { deh.mtCatch = TO_CDADDR(&EHClause.TypeHandle); } else if(!IsFaultOrFinally(&EHClause)) { // the module of the token (whether a ref or def token) is the same as the module of the method containing the EH clause deh.moduleAddr = HOST_CDADDR(codeInfo.GetMethodDesc()->GetModule()); deh.tokCatch = EHClause.ClassToken; } deh.tryStartOffset = EHClause.TryStartPC; deh.tryEndOffset = EHClause.TryEndPC; deh.handlerStartOffset = EHClause.HandlerStartPC; deh.handlerEndOffset = EHClause.HandlerEndPC; deh.isDuplicateClause = IsDuplicateClause(&EHClause); if (!(pFunc)(i, EHCount, &deh, token)) { // User wants to stop the enumeration hr = E_ABORT; break; } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::TraverseRCWCleanupList(CLRDATA_ADDRESS cleanupListPtr, VISITRCWFORCLEANUP pFunc, LPVOID token) { #ifdef FEATURE_COMINTEROP if (pFunc == 0) return E_INVALIDARG; SOSDacEnter(); RCWCleanupList *pList = g_pRCWCleanupList; if (cleanupListPtr) { pList = PTR_RCWCleanupList(TO_TADDR(cleanupListPtr)); } if (pList) { PTR_RCW pBucket = dac_cast<PTR_RCW>(TO_TADDR(pList->m_pFirstBucket)); while (pBucket != NULL) { PTR_RCW pRCW = pBucket; Thread *pSTAThread = pRCW->GetSTAThread(); LPVOID pCtxCookie = pRCW->GetWrapperCtxCookie(); BOOL bIsFreeThreaded = pRCW->IsFreeThreaded(); while (pRCW) { (pFunc)(HOST_CDADDR(pRCW),(CLRDATA_ADDRESS)pCtxCookie, (CLRDATA_ADDRESS)(TADDR)pSTAThread, bIsFreeThreaded, token); pRCW = pRCW->m_pNextRCW; } pBucket = pBucket->m_pNextCleanupBucket; } } SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif // FEATURE_COMINTEROP } HRESULT ClrDataAccess::TraverseLoaderHeap(CLRDATA_ADDRESS loaderHeapAddr, VISITHEAP pFunc) { if (loaderHeapAddr == 0 || pFunc == 0) return E_INVALIDARG; SOSDacEnter(); LoaderHeap *pLoaderHeap = PTR_LoaderHeap(TO_TADDR(loaderHeapAddr)); PTR_LoaderHeapBlock block = pLoaderHeap->m_pFirstBlock; while (block.IsValid()) { TADDR addr = PTR_TO_TADDR(block->pVirtualAddress); size_t size = block->dwVirtualSize; BOOL bCurrentBlock = (block == pLoaderHeap->m_pFirstBlock); pFunc(addr,size,bCurrentBlock); block = block->pNext; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::TraverseVirtCallStubHeap(CLRDATA_ADDRESS pAppDomain, VCSHeapType heaptype, VISITHEAP pFunc) { if (pAppDomain == 0) return E_INVALIDARG; SOSDacEnter(); BaseDomain* pBaseDomain = PTR_BaseDomain(TO_TADDR(pAppDomain)); VirtualCallStubManager *pVcsMgr = PTR_VirtualCallStubManager((TADDR)pBaseDomain->GetLoaderAllocator()->GetVirtualCallStubManager()); if (!pVcsMgr) { hr = E_POINTER; } else { LoaderHeap *pLoaderHeap = NULL; switch(heaptype) { case IndcellHeap: pLoaderHeap = pVcsMgr->indcell_heap; break; case LookupHeap: pLoaderHeap = pVcsMgr->lookup_heap; break; case ResolveHeap: pLoaderHeap = pVcsMgr->resolve_heap; break; case DispatchHeap: pLoaderHeap = pVcsMgr->dispatch_heap; break; case CacheEntryHeap: pLoaderHeap = pVcsMgr->cache_entry_heap; break; default: hr = E_INVALIDARG; } if (SUCCEEDED(hr)) { PTR_LoaderHeapBlock block = pLoaderHeap->m_pFirstBlock; while (block.IsValid()) { TADDR addr = PTR_TO_TADDR(block->pVirtualAddress); size_t size = block->dwVirtualSize; BOOL bCurrentBlock = (block == pLoaderHeap->m_pFirstBlock); pFunc(addr, size, bCurrentBlock); block = block->pNext; } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetSyncBlockData(unsigned int SBNumber, struct DacpSyncBlockData *pSyncBlockData) { if (pSyncBlockData == NULL) return E_INVALIDARG; SOSDacEnter(); ZeroMemory(pSyncBlockData,sizeof(DacpSyncBlockData)); pSyncBlockData->SyncBlockCount = (SyncBlockCache::s_pSyncBlockCache->m_FreeSyncTableIndex) - 1; PTR_SyncTableEntry ste = PTR_SyncTableEntry(dac_cast<TADDR>(g_pSyncTable)+(sizeof(SyncTableEntry) * SBNumber)); pSyncBlockData->bFree = ((dac_cast<TADDR>(ste->m_Object.Load())) & 1); if (pSyncBlockData->bFree == FALSE) { pSyncBlockData->Object = (CLRDATA_ADDRESS)dac_cast<TADDR>(ste->m_Object.Load()); if (ste->m_SyncBlock != NULL) { SyncBlock *pBlock = PTR_SyncBlock(ste->m_SyncBlock); pSyncBlockData->SyncBlockPointer = HOST_CDADDR(pBlock); #ifdef FEATURE_COMINTEROP if (pBlock->m_pInteropInfo) { pSyncBlockData->COMFlags |= (pBlock->m_pInteropInfo->DacGetRawRCW() != 0) ? SYNCBLOCKDATA_COMFLAGS_RCW : 0; pSyncBlockData->COMFlags |= (pBlock->m_pInteropInfo->GetCCW() != NULL) ? SYNCBLOCKDATA_COMFLAGS_CCW : 0; #ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION pSyncBlockData->COMFlags |= (pBlock->m_pInteropInfo->GetComClassFactory() != NULL) ? SYNCBLOCKDATA_COMFLAGS_CF : 0; #endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION } #endif // FEATURE_COMINTEROP pSyncBlockData->MonitorHeld = pBlock->m_Monitor.GetMonitorHeldStateVolatile(); pSyncBlockData->Recursion = pBlock->m_Monitor.GetRecursionLevel(); pSyncBlockData->HoldingThread = HOST_CDADDR(pBlock->m_Monitor.GetHoldingThread()); pSyncBlockData->appDomainPtr = PTR_HOST_TO_TADDR(AppDomain::GetCurrentDomain()); // TODO: Microsoft, implement the wait list pSyncBlockData->AdditionalThreadCount = 0; if (pBlock->m_Link.m_pNext != NULL) { PTR_SLink pLink = pBlock->m_Link.m_pNext; do { pSyncBlockData->AdditionalThreadCount++; pLink = pBlock->m_Link.m_pNext; } while ((pLink != NULL) && (pSyncBlockData->AdditionalThreadCount < 1000)); } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetSyncBlockCleanupData(CLRDATA_ADDRESS syncBlock, struct DacpSyncBlockCleanupData *syncBlockCData) { if (syncBlock == 0 || syncBlockCData == NULL) return E_INVALIDARG; SOSDacEnter(); ZeroMemory (syncBlockCData, sizeof(DacpSyncBlockCleanupData)); SyncBlock *pBlock = NULL; if (syncBlock == NULL && SyncBlockCache::s_pSyncBlockCache->m_pCleanupBlockList) { pBlock = (SyncBlock *) PTR_SyncBlock( PTR_HOST_TO_TADDR(SyncBlockCache::s_pSyncBlockCache->m_pCleanupBlockList) - offsetof(SyncBlock, m_Link)); } else { pBlock = PTR_SyncBlock(TO_TADDR(syncBlock)); } if (pBlock) { syncBlockCData->SyncBlockPointer = HOST_CDADDR(pBlock); if (pBlock->m_Link.m_pNext) { syncBlockCData->nextSyncBlock = (CLRDATA_ADDRESS) (PTR_HOST_TO_TADDR(pBlock->m_Link.m_pNext) - offsetof(SyncBlock, m_Link)); } #ifdef FEATURE_COMINTEROP if (pBlock->m_pInteropInfo->DacGetRawRCW()) syncBlockCData->blockRCW = (CLRDATA_ADDRESS) pBlock->m_pInteropInfo->DacGetRawRCW(); #ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION if (pBlock->m_pInteropInfo->GetComClassFactory()) syncBlockCData->blockClassFactory = (CLRDATA_ADDRESS) (TADDR) pBlock->m_pInteropInfo->GetComClassFactory(); #endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION if (pBlock->m_pInteropInfo->GetCCW()) syncBlockCData->blockCCW = (CLRDATA_ADDRESS) dac_cast<TADDR>(pBlock->m_pInteropInfo->GetCCW()); #endif // FEATURE_COMINTEROP } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetJitHelperFunctionName(CLRDATA_ADDRESS ip, unsigned int count, _Inout_updates_z_(count) char *name, unsigned int *pNeeded) { SOSDacEnter(); PCSTR pszHelperName = GetJitHelperName(TO_TADDR(ip)); if (pszHelperName == NULL) { hr = E_INVALIDARG; } else { unsigned int len = (unsigned int)strlen(pszHelperName) + 1; if (pNeeded) *pNeeded = len; if (name) { if (count < len) hr = E_FAIL; else strcpy_s(name, count, pszHelperName); } } SOSDacLeave(); return hr; }; HRESULT ClrDataAccess::GetJumpThunkTarget(T_CONTEXT *ctx, CLRDATA_ADDRESS *targetIP, CLRDATA_ADDRESS *targetMD) { if (ctx == NULL || targetIP == NULL || targetMD == NULL) return E_INVALIDARG; #ifdef TARGET_AMD64 SOSDacEnter(); if (!GetAnyThunkTarget(ctx, targetIP, targetMD)) hr = E_FAIL; SOSDacLeave(); return hr; #else return E_FAIL; #endif // TARGET_AMD64 } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:21000) // Suppress PREFast warning about overly large function #endif STDMETHODIMP ClrDataAccess::Request(IN ULONG32 reqCode, IN ULONG32 inBufferSize, IN BYTE* inBuffer, IN ULONG32 outBufferSize, OUT BYTE* outBuffer) { HRESULT status; DAC_ENTER(); EX_TRY { switch(reqCode) { case CLRDATA_REQUEST_REVISION: if (inBufferSize != 0 || inBuffer || outBufferSize != sizeof(ULONG32)) { status = E_INVALIDARG; } else { *(ULONG32*)outBuffer = 9; status = S_OK; } break; default: status = E_INVALIDARG; break; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } #ifdef _PREFAST_ #pragma warning(pop) #endif void ClrDataAccess::EnumWksGlobalMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; #ifdef FEATURE_SVR_GC // If server GC, skip enumeration if (g_gcDacGlobals->g_heaps != nullptr) return; #endif Dereference(g_gcDacGlobals->ephemeral_heap_segment).EnumMem(); g_gcDacGlobals->alloc_allocated.EnumMem(); g_gcDacGlobals->gc_structures_invalid_cnt.EnumMem(); Dereference(g_gcDacGlobals->finalize_queue).EnumMem(); // Enumerate the entire generation table, which has variable size EnumGenerationTable(dac_cast<TADDR>(g_gcDacGlobals->generation_table)); if (g_gcDacGlobals->generation_table.IsValid()) { ULONG first = IsRegionGCEnabled() ? 0 : (*g_gcDacGlobals->max_gen); // enumerating the first to max + 2 gives you // the segment list for all the normal segments plus the pinned heap segment (max + 2) // this is the convention in the GC so it is repeated here for (ULONG i = first; i <= *g_gcDacGlobals->max_gen + 2; i++) { dac_generation gen = GenerationTableIndex(g_gcDacGlobals->generation_table, i); __DPtr<dac_heap_segment> seg = dac_cast<TADDR>(gen.start_segment); while (seg) { DacEnumMemoryRegion(dac_cast<TADDR>(seg), sizeof(dac_heap_segment)); seg = seg->next; } } } } HRESULT ClrDataAccess::GetClrWatsonBuckets(CLRDATA_ADDRESS thread, void *pGenericModeBlock) { #ifdef TARGET_UNIX // This API is not available under TARGET_UNIX return E_FAIL; #else // TARGET_UNIX if (thread == 0 || pGenericModeBlock == NULL) return E_INVALIDARG; SOSDacEnter(); Thread * pThread = PTR_Thread(TO_TADDR(thread)); hr = GetClrWatsonBucketsWorker(pThread, reinterpret_cast<GenericModeBlock *>(pGenericModeBlock)); SOSDacLeave(); return hr; #endif // TARGET_UNIX } #ifndef TARGET_UNIX HRESULT ClrDataAccess::GetClrWatsonBucketsWorker(Thread * pThread, GenericModeBlock * pGM) { if ((pThread == NULL) || (pGM == NULL)) { return E_INVALIDARG; } // By default, there are no buckets PTR_VOID pBuckets = NULL; // Get the handle to the throwble OBJECTHANDLE ohThrowable = pThread->GetThrowableAsHandle(); if (ohThrowable != NULL) { // Get the object from handle and check if the throwable is preallocated or not OBJECTREF oThrowable = ObjectFromHandle(ohThrowable); if (oThrowable != NULL) { // Does the throwable have buckets? U1ARRAYREF refWatsonBucketArray = ((EXCEPTIONREF)oThrowable)->GetWatsonBucketReference(); if (refWatsonBucketArray != NULL) { // Get the watson buckets from the throwable for non-preallocated // exceptions pBuckets = dac_cast<PTR_VOID>(refWatsonBucketArray->GetDataPtr()); } else { // This is a preallocated exception object - check if the UE Watson bucket tracker // has any bucket details pBuckets = pThread->GetExceptionState()->GetUEWatsonBucketTracker()->RetrieveWatsonBuckets(); if (pBuckets == NULL) { // Since the UE watson bucket tracker does not have them, look up the current // exception tracker if (pThread->GetExceptionState()->GetCurrentExceptionTracker() != NULL) { pBuckets = pThread->GetExceptionState()->GetCurrentExceptionTracker()->GetWatsonBucketTracker()->RetrieveWatsonBuckets(); } } } } } else { // Debuger.Break doesn't have a throwable, but saves Watson buckets in EHWatsonBucketTracker. pBuckets = pThread->GetExceptionState()->GetUEWatsonBucketTracker()->RetrieveWatsonBuckets(); } // If pBuckets is non-null, it is the address of a Watson GenericModeBlock in the target process. if (pBuckets != NULL) { ULONG32 returned = 0; HRESULT hr = m_pTarget->ReadVirtual(dac_cast<TADDR>(pBuckets), reinterpret_cast<BYTE *>(pGM), sizeof(*pGM), &returned); if (FAILED(hr)) { hr = CORDBG_E_READVIRTUAL_FAILURE; } if (SUCCEEDED(hr) && (returned != sizeof(*pGM))) { hr = HRESULT_FROM_WIN32(ERROR_PARTIAL_COPY); } return hr; } else { // Buckets are not available return S_FALSE; } } #endif // TARGET_UNIX HRESULT ClrDataAccess::GetTLSIndex(ULONG *pIndex) { if (pIndex == NULL) return E_INVALIDARG; SOSDacEnter(); if (g_TlsIndex == TLS_OUT_OF_INDEXES) { *pIndex = 0; hr = S_FALSE; } else { *pIndex = g_TlsIndex; } SOSDacLeave(); return hr; } #ifndef TARGET_UNIX extern "C" IMAGE_DOS_HEADER __ImageBase; #endif HRESULT ClrDataAccess::GetDacModuleHandle(HMODULE *phModule) { if(phModule == NULL) return E_INVALIDARG; #ifndef TARGET_UNIX *phModule = (HMODULE)&__ImageBase; return S_OK; #else // hModule is not available under TARGET_UNIX return E_FAIL; #endif } HRESULT ClrDataAccess::GetRCWData(CLRDATA_ADDRESS addr, struct DacpRCWData *rcwData) { if (addr == 0 || rcwData == NULL) return E_INVALIDARG; #ifdef FEATURE_COMINTEROP SOSDacEnter(); ZeroMemory (rcwData, sizeof(DacpRCWData)); PTR_RCW pRCW = dac_cast<PTR_RCW>(CLRDATA_ADDRESS_TO_TADDR(addr)); rcwData->identityPointer = TO_CDADDR(pRCW->m_pIdentity); rcwData->unknownPointer = TO_CDADDR(pRCW->GetRawIUnknown_NoAddRef()); rcwData->vtablePtr = TO_CDADDR(pRCW->m_vtablePtr); rcwData->creatorThread = TO_CDADDR(pRCW->m_pCreatorThread); rcwData->ctxCookie = TO_CDADDR(pRCW->GetWrapperCtxCookie()); rcwData->refCount = pRCW->m_cbRefCount; rcwData->isAggregated = pRCW->IsURTAggregated(); rcwData->isContained = pRCW->IsURTContained(); rcwData->isFreeThreaded = pRCW->IsFreeThreaded(); rcwData->isDisconnected = pRCW->IsDisconnected(); if (pRCW->m_SyncBlockIndex != 0) { PTR_SyncTableEntry ste = PTR_SyncTableEntry(dac_cast<TADDR>(g_pSyncTable) + (sizeof(SyncTableEntry) * pRCW->m_SyncBlockIndex)); rcwData->managedObject = PTR_CDADDR(ste->m_Object.Load()); } // count the number of cached interface pointers rcwData->interfaceCount = 0; RCW::CachedInterfaceEntryIterator it = pRCW->IterateCachedInterfacePointers(); while (it.Next()) { if (it.GetEntry()->m_pUnknown.Load() != NULL) rcwData->interfaceCount++; } SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif } HRESULT ClrDataAccess::GetRCWInterfaces(CLRDATA_ADDRESS rcw, unsigned int count, struct DacpCOMInterfacePointerData interfaces[], unsigned int *pNeeded) { if (rcw == 0) return E_INVALIDARG; #ifdef FEATURE_COMINTEROP SOSDacEnter(); PTR_RCW pRCW = dac_cast<PTR_RCW>(CLRDATA_ADDRESS_TO_TADDR(rcw)); if (interfaces == NULL) { if (pNeeded) { unsigned int c = 0; RCW::CachedInterfaceEntryIterator it = pRCW->IterateCachedInterfacePointers(); while (it.Next()) { if (it.GetEntry()->m_pUnknown.Load() != NULL) c++; } *pNeeded = c; } else { hr = E_INVALIDARG; } } else { ZeroMemory(interfaces, sizeof(DacpCOMInterfacePointerData) * count); unsigned int itemIndex = 0; RCW::CachedInterfaceEntryIterator it = pRCW->IterateCachedInterfacePointers(); while (it.Next()) { InterfaceEntry *pEntry = it.GetEntry(); if (pEntry->m_pUnknown.Load() != NULL) { if (itemIndex >= count) { // the outBuffer is too small hr = E_INVALIDARG; break; } else { interfaces[itemIndex].interfacePtr = TO_CDADDR(pEntry->m_pUnknown.Load()); interfaces[itemIndex].methodTable = TO_CDADDR(pEntry->m_pMT.Load()); interfaces[itemIndex].comContext = TO_CDADDR(it.GetCtxCookie()); itemIndex++; } } } if (SUCCEEDED(hr) && pNeeded) *pNeeded = itemIndex; } SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif } #ifdef FEATURE_COMINTEROP PTR_ComCallWrapper ClrDataAccess::DACGetCCWFromAddress(CLRDATA_ADDRESS addr) { PTR_ComCallWrapper pCCW = NULL; // first check whether the address is our COM IP TADDR pPtr = CLRDATA_ADDRESS_TO_TADDR(addr); ULONG32 returned = 0; if (m_pTarget->ReadVirtual(pPtr, (PBYTE)&pPtr, sizeof(TADDR), &returned) == S_OK && returned == sizeof(TADDR)) { // this should be the vtable pointer - dereference the 2nd slot if (m_pTarget->ReadVirtual(pPtr + sizeof(PBYTE) * TEAR_OFF_SLOT, (PBYTE)&pPtr, sizeof(TADDR), &returned) == S_OK && returned == sizeof(TADDR)) { #ifdef TARGET_ARM // clear the THUMB bit on pPtr before comparing with known vtable entry pPtr &= ~THUMB_CODE; #endif if (pPtr == GetEEFuncEntryPoint(TEAR_OFF_STANDARD)) { // Points to ComCallWrapper PTR_IUnknown pUnk(CLRDATA_ADDRESS_TO_TADDR(addr)); pCCW = ComCallWrapper::GetWrapperFromIP(pUnk); } else if (pPtr == GetEEFuncEntryPoint(TEAR_OFF_SIMPLE) || pPtr == GetEEFuncEntryPoint(TEAR_OFF_SIMPLE_INNER)) { // Points to SimpleComCallWrapper PTR_IUnknown pUnk(CLRDATA_ADDRESS_TO_TADDR(addr)); pCCW = SimpleComCallWrapper::GetWrapperFromIP(pUnk)->GetMainWrapper(); } } } if (pCCW == NULL) { // no luck interpreting the address as a COM interface pointer - it must be a CCW address pCCW = dac_cast<PTR_ComCallWrapper>(CLRDATA_ADDRESS_TO_TADDR(addr)); } if (pCCW->IsLinked()) pCCW = ComCallWrapper::GetStartWrapper(pCCW); return pCCW; } PTR_IUnknown ClrDataAccess::DACGetCOMIPFromCCW(PTR_ComCallWrapper pCCW, int vtableIndex) { if (pCCW->m_rgpIPtr[vtableIndex] != NULL) { PTR_IUnknown pUnk = dac_cast<PTR_IUnknown>(dac_cast<TADDR>(pCCW) + offsetof(ComCallWrapper, m_rgpIPtr[vtableIndex])); PTR_ComMethodTable pCMT = ComMethodTable::ComMethodTableFromIP(pUnk); if (pCMT->IsLayoutComplete()) { // return only fully laid out vtables return pUnk; } } return NULL; } #endif #ifdef FEATURE_COMWRAPPERS BOOL ClrDataAccess::DACGetComWrappersCCWVTableQIAddress(CLRDATA_ADDRESS ccwPtr, TADDR *vTableAddress, TADDR *qiAddress) { _ASSERTE(vTableAddress != NULL && qiAddress != NULL); HRESULT hr = S_OK; ULONG32 bytesRead = 0; TADDR ccw = CLRDATA_ADDRESS_TO_TADDR(ccwPtr); *vTableAddress = NULL; if (FAILED(m_pTarget->ReadVirtual(ccw, (PBYTE)vTableAddress, sizeof(TADDR), &bytesRead)) || bytesRead != sizeof(TADDR) || vTableAddress == NULL) { return FALSE; } *qiAddress = NULL; if (FAILED(m_pTarget->ReadVirtual(*vTableAddress, (PBYTE)qiAddress, sizeof(TADDR), &bytesRead)) || bytesRead != sizeof(TADDR) || qiAddress == NULL) { return FALSE; } #ifdef TARGET_ARM // clear the THUMB bit on qiAddress before comparing with known vtable entry *qiAddress &= ~THUMB_CODE; #endif return TRUE; } BOOL ClrDataAccess::DACIsComWrappersCCW(CLRDATA_ADDRESS ccwPtr) { TADDR vTableAddress = NULL; TADDR qiAddress = NULL; if (!DACGetComWrappersCCWVTableQIAddress(ccwPtr, &vTableAddress, &qiAddress)) { return FALSE; } return (qiAddress == GetEEFuncEntryPoint(ManagedObjectWrapper_QueryInterface) || qiAddress == GetEEFuncEntryPoint(TrackerTarget_QueryInterface)); } TADDR ClrDataAccess::DACGetManagedObjectWrapperFromCCW(CLRDATA_ADDRESS ccwPtr) { if (!DACIsComWrappersCCW(ccwPtr)) { return NULL; } ULONG32 bytesRead = 0; TADDR managedObjectWrapperPtrPtr = ccwPtr & InteropLib::ABI::DispatchThisPtrMask; TADDR managedObjectWrapperPtr = 0; if (FAILED(m_pTarget->ReadVirtual(managedObjectWrapperPtrPtr, (PBYTE)&managedObjectWrapperPtr, sizeof(TADDR), &bytesRead)) || bytesRead != sizeof(TADDR)) { return NULL; } return managedObjectWrapperPtr; } HRESULT ClrDataAccess::DACTryGetComWrappersHandleFromCCW(CLRDATA_ADDRESS ccwPtr, OBJECTHANDLE* objHandle) { HRESULT hr = E_FAIL; TADDR ccw, managedObjectWrapperPtr; ULONG32 bytesRead = 0; OBJECTHANDLE handle; if (ccwPtr == 0 || objHandle == NULL) { hr = E_INVALIDARG; goto ErrExit; } if (!DACIsComWrappersCCW(ccwPtr)) { hr = E_FAIL; goto ErrExit; } ccw = CLRDATA_ADDRESS_TO_TADDR(ccwPtr); // Return ManagedObjectWrapper as an OBJECTHANDLE. (The OBJECTHANDLE is guaranteed to live at offset 0). managedObjectWrapperPtr = DACGetManagedObjectWrapperFromCCW(ccwPtr); if (managedObjectWrapperPtr == NULL) { hr = E_FAIL; goto ErrExit; } IfFailGo(m_pTarget->ReadVirtual(managedObjectWrapperPtr, (PBYTE)&handle, sizeof(OBJECTHANDLE), &bytesRead)); if (bytesRead != sizeof(OBJECTHANDLE)) { hr = E_FAIL; goto ErrExit; } *objHandle = handle; return S_OK; ErrExit: return hr; } HRESULT ClrDataAccess::DACTryGetComWrappersObjectFromCCW(CLRDATA_ADDRESS ccwPtr, OBJECTREF* objRef) { HRESULT hr = E_FAIL; if (ccwPtr == 0 || objRef == NULL) { hr = E_INVALIDARG; goto ErrExit; } OBJECTHANDLE handle; if (DACTryGetComWrappersHandleFromCCW(ccwPtr, &handle) != S_OK) { hr = E_FAIL; goto ErrExit; } *objRef = ObjectFromHandle(handle); return S_OK; ErrExit: return hr; } #endif HRESULT ClrDataAccess::GetCCWData(CLRDATA_ADDRESS ccw, struct DacpCCWData *ccwData) { if (ccw == 0 || ccwData == NULL) return E_INVALIDARG; #ifdef FEATURE_COMINTEROP SOSDacEnter(); ZeroMemory (ccwData, sizeof(DacpCCWData)); PTR_ComCallWrapper pCCW = DACGetCCWFromAddress(ccw); PTR_SimpleComCallWrapper pSimpleCCW = pCCW->GetSimpleWrapper(); ccwData->outerIUnknown = TO_CDADDR(pSimpleCCW->m_pOuter); ccwData->refCount = pSimpleCCW->GetRefCount(); ccwData->isNeutered = pSimpleCCW->IsNeutered(); ccwData->ccwAddress = TO_CDADDR(dac_cast<TADDR>(pCCW)); ccwData->hasStrongRef = pCCW->IsWrapperActive(); ccwData->handle = pCCW->GetObjectHandle(); ccwData->isExtendsCOMObject = pCCW->GetSimpleWrapper()->IsExtendsCOMObject(); ccwData->isAggregated = pCCW->GetSimpleWrapper()->IsAggregated(); if (pCCW->GetObjectHandle() != NULL) ccwData->managedObject = PTR_CDADDR(ObjectFromHandle(pCCW->GetObjectHandle())); // count the number of COM vtables ccwData->interfaceCount = 0; while (pCCW != NULL) { for (int i = 0; i < ComCallWrapper::NumVtablePtrs; i++) { if (DACGetCOMIPFromCCW(pCCW, i) != NULL) ccwData->interfaceCount++; } pCCW = ComCallWrapper::GetNext(pCCW); } SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif } HRESULT ClrDataAccess::GetCCWInterfaces(CLRDATA_ADDRESS ccw, unsigned int count, struct DacpCOMInterfacePointerData interfaces[], unsigned int *pNeeded) { if (ccw == 0) return E_INVALIDARG; #ifdef FEATURE_COMINTEROP SOSDacEnter(); PTR_ComCallWrapper pCCW = DACGetCCWFromAddress(ccw); if (interfaces == NULL) { if (pNeeded) { unsigned int c = 0; while (pCCW != NULL) { for (int i = 0; i < ComCallWrapper::NumVtablePtrs; i++) if (DACGetCOMIPFromCCW(pCCW, i) != NULL) c++; pCCW = ComCallWrapper::GetNext(pCCW); } *pNeeded = c; } else { hr = E_INVALIDARG; } } else { ZeroMemory(interfaces, sizeof(DacpCOMInterfacePointerData) * count); PTR_ComCallWrapperTemplate pCCWTemplate = pCCW->GetSimpleWrapper()->GetComCallWrapperTemplate(); unsigned int itemIndex = 0; unsigned int wrapperOffset = 0; while (pCCW != NULL && SUCCEEDED(hr)) { for (int i = 0; i < ComCallWrapper::NumVtablePtrs && SUCCEEDED(hr); i++) { PTR_IUnknown pUnk = DACGetCOMIPFromCCW(pCCW, i); if (pUnk != NULL) { if (itemIndex >= count) { // the outBuffer is too small hr = E_INVALIDARG; break; } interfaces[itemIndex].interfacePtr = PTR_CDADDR(pUnk); // if this is the first ComCallWrapper, the 0th vtable slots is special if (wrapperOffset == 0 && i == ComCallWrapper::Slot_Basic) { // this is IDispatch/IUnknown interfaces[itemIndex].methodTable = NULL; } else { // this slot represents the class interface or an interface implemented by the class DWORD ifaceMapIndex = wrapperOffset + i - ComCallWrapper::Slot_FirstInterface; PTR_ComMethodTable pCMT = ComMethodTable::ComMethodTableFromIP(pUnk); interfaces[itemIndex].methodTable = PTR_CDADDR(pCMT->GetMethodTable()); } itemIndex++; } } pCCW = ComCallWrapper::GetNext(pCCW); wrapperOffset += ComCallWrapper::NumVtablePtrs; } if (SUCCEEDED(hr) && pNeeded) *pNeeded = itemIndex; } SOSDacLeave(); return hr; #else return E_NOTIMPL; #endif } HRESULT ClrDataAccess::GetObjectExceptionData(CLRDATA_ADDRESS objAddr, struct DacpExceptionObjectData *data) { if (data == NULL) return E_POINTER; SOSDacEnter(); PTR_ExceptionObject pObj = dac_cast<PTR_ExceptionObject>(TO_TADDR(objAddr)); data->Message = TO_CDADDR(dac_cast<TADDR>(pObj->GetMessage())); data->InnerException = TO_CDADDR(dac_cast<TADDR>(pObj->GetInnerException())); data->StackTrace = TO_CDADDR(dac_cast<TADDR>(pObj->GetStackTraceArrayObject())); data->WatsonBuckets = TO_CDADDR(dac_cast<TADDR>(pObj->GetWatsonBucketReference())); data->StackTraceString = TO_CDADDR(dac_cast<TADDR>(pObj->GetStackTraceString())); data->RemoteStackTraceString = TO_CDADDR(dac_cast<TADDR>(pObj->GetRemoteStackTraceString())); data->HResult = pObj->GetHResult(); data->XCode = pObj->GetXCode(); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::IsRCWDCOMProxy(CLRDATA_ADDRESS rcwAddr, BOOL* isDCOMProxy) { if (isDCOMProxy == nullptr) { return E_POINTER; } *isDCOMProxy = FALSE; #ifdef FEATURE_COMINTEROP return S_OK; #else return E_NOTIMPL; #endif // FEATURE_COMINTEROP } HRESULT ClrDataAccess::GetClrNotification(CLRDATA_ADDRESS arguments[], int count, int *pNeeded) { SOSDacEnter(); *pNeeded = MAX_CLR_NOTIFICATION_ARGS; if (g_clrNotificationArguments[0] == NULL) { hr = E_FAIL; } else { for (int i = 0; i < count && i < MAX_CLR_NOTIFICATION_ARGS; i++) { arguments[i] = g_clrNotificationArguments[i]; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetPendingReJITID(CLRDATA_ADDRESS methodDesc, int *pRejitId) { if (methodDesc == 0 || pRejitId == NULL) { return E_INVALIDARG; } SOSDacEnter(); *pRejitId = -1; PTR_MethodDesc pMD = PTR_MethodDesc(TO_TADDR(methodDesc)); CodeVersionManager* pCodeVersionManager = pMD->GetCodeVersionManager(); CodeVersionManager::LockHolder codeVersioningLockHolder; ILCodeVersion ilVersion = pCodeVersionManager->GetActiveILCodeVersion(pMD); if (ilVersion.IsNull()) { hr = E_INVALIDARG; } else if (ilVersion.GetRejitState() == ILCodeVersion::kStateRequested) { *pRejitId = (int)ilVersion.GetVersionId(); } else { hr = S_FALSE; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetReJITInformation(CLRDATA_ADDRESS methodDesc, int rejitId, struct DacpReJitData2 *pReJitData) { if (methodDesc == 0 || rejitId < 0 || pReJitData == NULL) { return E_INVALIDARG; } SOSDacEnter(); PTR_MethodDesc pMD = PTR_MethodDesc(TO_TADDR(methodDesc)); CodeVersionManager* pCodeVersionManager = pMD->GetCodeVersionManager(); CodeVersionManager::LockHolder codeVersioningLockHolder; ILCodeVersion ilVersion = pCodeVersionManager->GetILCodeVersion(pMD, rejitId); if (ilVersion.IsNull()) { hr = E_INVALIDARG; } else { pReJitData->rejitID = rejitId; switch (ilVersion.GetRejitState()) { default: _ASSERTE(!"Unknown SharedRejitInfo state. DAC should be updated to understand this new state."); pReJitData->flags = DacpReJitData2::kUnknown; break; case ILCodeVersion::kStateRequested: pReJitData->flags = DacpReJitData2::kRequested; break; case ILCodeVersion::kStateActive: pReJitData->flags = DacpReJitData2::kActive; break; } pReJitData->il = TO_CDADDR(PTR_TO_TADDR(ilVersion.GetIL())); PTR_ILCodeVersionNode nodePtr = ilVersion.IsDefaultVersion() ? NULL : ilVersion.AsNode(); pReJitData->ilCodeVersionNodePtr = TO_CDADDR(PTR_TO_TADDR(nodePtr)); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetProfilerModifiedILInformation(CLRDATA_ADDRESS methodDesc, struct DacpProfilerILData *pILData) { if (methodDesc == 0 || pILData == NULL) { return E_INVALIDARG; } SOSDacEnter(); pILData->type = DacpProfilerILData::Unmodified; pILData->rejitID = 0; pILData->il = NULL; PTR_MethodDesc pMD = PTR_MethodDesc(TO_TADDR(methodDesc)); CodeVersionManager* pCodeVersionManager = pMD->GetCodeVersionManager(); CodeVersionManager::LockHolder codeVersioningLockHolder; ILCodeVersion ilVersion = pCodeVersionManager->GetActiveILCodeVersion(pMD); if (ilVersion.GetRejitState() != ILCodeVersion::kStateActive || !ilVersion.HasDefaultIL()) { pILData->type = DacpProfilerILData::ReJITModified; pILData->rejitID = static_cast<ULONG>(pCodeVersionManager->GetActiveILCodeVersion(pMD).GetVersionId()); } TADDR pDynamicIL = pMD->GetModule()->GetDynamicIL(pMD->GetMemberDef(), TRUE); if (pDynamicIL != NULL) { pILData->type = DacpProfilerILData::ILModified; pILData->il = (CLRDATA_ADDRESS)pDynamicIL; } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetMethodsWithProfilerModifiedIL(CLRDATA_ADDRESS mod, CLRDATA_ADDRESS *methodDescs, int cMethodDescs, int *pcMethodDescs) { if (mod == 0 || methodDescs == NULL || cMethodDescs == 0 || pcMethodDescs == NULL) { return E_INVALIDARG; } SOSDacEnter(); *pcMethodDescs = 0; PTR_Module pModule = PTR_Module(TO_TADDR(mod)); CodeVersionManager* pCodeVersionManager = pModule->GetCodeVersionManager(); CodeVersionManager::LockHolder codeVersioningLockHolder; LookupMap<PTR_MethodTable>::Iterator typeIter(&pModule->m_TypeDefToMethodTableMap); for (int i = 0; typeIter.Next(); i++) { if (*pcMethodDescs >= cMethodDescs) { break; } if (typeIter.GetElement()) { MethodTable* pMT = typeIter.GetElement(); for (MethodTable::IntroducedMethodIterator itMethods(pMT, FALSE); itMethods.IsValid(); itMethods.Next()) { PTR_MethodDesc pMD = dac_cast<PTR_MethodDesc>(itMethods.GetMethodDesc()); TADDR pDynamicIL = pModule->GetDynamicIL(pMD->GetMemberDef(), TRUE); ILCodeVersion ilVersion = pCodeVersionManager->GetActiveILCodeVersion(pMD); if (ilVersion.GetRejitState() != ILCodeVersion::kStateActive || !ilVersion.HasDefaultIL() || pDynamicIL != NULL) { methodDescs[*pcMethodDescs] = PTR_CDADDR(pMD); ++(*pcMethodDescs); } if (*pcMethodDescs >= cMethodDescs) { break; } } } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetNumberGenerations(unsigned int *pGenerations) { if (pGenerations == NULL) { return E_INVALIDARG; } SOSDacEnter(); *pGenerations = (unsigned int)(g_gcDacGlobals->total_generation_count); SOSDacLeave(); return S_OK; } HRESULT ClrDataAccess::GetGenerationTable(unsigned int cGenerations, struct DacpGenerationData *pGenerationData, unsigned int *pNeeded) { if (cGenerations > 0 && pGenerationData == NULL) { return E_INVALIDARG; } SOSDacEnter(); HRESULT hr = S_OK; unsigned int numGenerationTableEntries = (unsigned int)(g_gcDacGlobals->total_generation_count); if (pNeeded != NULL) { *pNeeded = numGenerationTableEntries; } if (cGenerations < numGenerationTableEntries) { hr = S_FALSE; } else { if (g_gcDacGlobals->generation_table.IsValid()) { for (unsigned int i = 0; i < numGenerationTableEntries; i++) { dac_generation generation = GenerationTableIndex(g_gcDacGlobals->generation_table, i); pGenerationData[i].start_segment = (CLRDATA_ADDRESS) dac_cast<TADDR>(generation.start_segment); pGenerationData[i].allocation_start = (CLRDATA_ADDRESS) generation.allocation_start; gc_alloc_context alloc_context = generation.allocation_context; pGenerationData[i].allocContextPtr = (CLRDATA_ADDRESS)alloc_context.alloc_ptr; pGenerationData[i].allocContextLimit = (CLRDATA_ADDRESS)alloc_context.alloc_limit; } } else { hr = E_FAIL; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetFinalizationFillPointers(unsigned int cFillPointers, CLRDATA_ADDRESS *pFinalizationFillPointers, unsigned int *pNeeded) { if (cFillPointers > 0 && pFinalizationFillPointers == NULL) { return E_INVALIDARG; } SOSDacEnter(); HRESULT hr = S_OK; unsigned int numFillPointers = (unsigned int)(g_gcDacGlobals->total_generation_count + dac_finalize_queue::ExtraSegCount); if (pNeeded != NULL) { *pNeeded = numFillPointers; } if (cFillPointers < numFillPointers) { hr = S_FALSE; } else { if (g_gcDacGlobals->finalize_queue.IsValid()) { DPTR(dac_finalize_queue) fq = Dereference(g_gcDacGlobals->finalize_queue); DPTR(uint8_t*) fillPointersTable = dac_cast<TADDR>(fq) + offsetof(dac_finalize_queue, m_FillPointers); for (unsigned int i = 0; i < numFillPointers; i++) { pFinalizationFillPointers[i] = (CLRDATA_ADDRESS)*TableIndex(fillPointersTable, i, sizeof(uint8_t*)); } } else { hr = E_FAIL; } } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetGenerationTableSvr(CLRDATA_ADDRESS heapAddr, unsigned int cGenerations, struct DacpGenerationData *pGenerationData, unsigned int *pNeeded) { if (heapAddr == NULL || (cGenerations > 0 && pGenerationData == NULL)) { return E_INVALIDARG; } SOSDacEnter(); HRESULT hr = S_OK; #ifdef FEATURE_SVR_GC unsigned int numGenerationTableEntries = (unsigned int)(g_gcDacGlobals->total_generation_count); if (pNeeded != NULL) { *pNeeded = numGenerationTableEntries; } if (cGenerations < numGenerationTableEntries) { hr = S_FALSE; } else { TADDR heapAddress = TO_TADDR(heapAddr); if (heapAddress != 0) { for (unsigned int i = 0; i < numGenerationTableEntries; ++i) { dac_generation generation = ServerGenerationTableIndex(heapAddress, i); pGenerationData[i].start_segment = (CLRDATA_ADDRESS)dac_cast<TADDR>(generation.start_segment); pGenerationData[i].allocation_start = (CLRDATA_ADDRESS)(ULONG_PTR)generation.allocation_start; gc_alloc_context alloc_context = generation.allocation_context; pGenerationData[i].allocContextPtr = (CLRDATA_ADDRESS)(ULONG_PTR)alloc_context.alloc_ptr; pGenerationData[i].allocContextLimit = (CLRDATA_ADDRESS)(ULONG_PTR)alloc_context.alloc_limit; } } else { hr = E_FAIL; } } #else hr = E_NOTIMPL; #endif SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetFinalizationFillPointersSvr(CLRDATA_ADDRESS heapAddr, unsigned int cFillPointers, CLRDATA_ADDRESS *pFinalizationFillPointers, unsigned int *pNeeded) { if (heapAddr == NULL || (cFillPointers > 0 && pFinalizationFillPointers == NULL)) { return E_INVALIDARG; } SOSDacEnter(); HRESULT hr = S_OK; #ifdef FEATURE_SVR_GC unsigned int numFillPointers = (unsigned int)(g_gcDacGlobals->total_generation_count + dac_finalize_queue::ExtraSegCount); if (pNeeded != NULL) { *pNeeded = numFillPointers; } if (cFillPointers < numFillPointers) { hr = S_FALSE; } else { TADDR heapAddress = TO_TADDR(heapAddr); if (heapAddress != 0) { dac_gc_heap heap = LoadGcHeapData(heapAddress); dac_gc_heap* pHeap = &heap; DPTR(dac_finalize_queue) fq = pHeap->finalize_queue; DPTR(uint8_t*) pFillPointerArray= dac_cast<TADDR>(fq) + offsetof(dac_finalize_queue, m_FillPointers); for (unsigned int i = 0; i < numFillPointers; ++i) { pFinalizationFillPointers[i] = (CLRDATA_ADDRESS) pFillPointerArray[i]; } } else { hr = E_FAIL; } } #else hr = E_NOTIMPL; #endif SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetAssemblyLoadContext(CLRDATA_ADDRESS methodTable, CLRDATA_ADDRESS* assemblyLoadContext) { if (methodTable == 0 || assemblyLoadContext == NULL) return E_INVALIDARG; SOSDacEnter(); PTR_MethodTable pMT = PTR_MethodTable(CLRDATA_ADDRESS_TO_TADDR(methodTable)); PTR_Module pModule = pMT->GetModule(); PTR_PEAssembly pPEAssembly = pModule->GetPEAssembly(); PTR_AssemblyBinder pBinder = pPEAssembly->GetAssemblyBinder(); INT_PTR managedAssemblyLoadContextHandle = pBinder->GetManagedAssemblyLoadContext(); TADDR managedAssemblyLoadContextAddr = 0; if (managedAssemblyLoadContextHandle != 0) { DacReadAll(managedAssemblyLoadContextHandle,&managedAssemblyLoadContextAddr,sizeof(TADDR),true); } *assemblyLoadContext = TO_CDADDR(managedAssemblyLoadContextAddr); SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetBreakingChangeVersion(int* pVersion) { if (pVersion == nullptr) return E_INVALIDARG; *pVersion = SOS_BREAKING_CHANGE_VERSION; return S_OK; } HRESULT ClrDataAccess::GetObjectComWrappersData(CLRDATA_ADDRESS objAddr, CLRDATA_ADDRESS *rcw, unsigned int count, CLRDATA_ADDRESS *mowList, unsigned int *pNeeded) { #ifdef FEATURE_COMWRAPPERS if (objAddr == 0 ) { return E_INVALIDARG; } if (count > 0 && mowList == NULL) { return E_INVALIDARG; } SOSDacEnter(); if (pNeeded != NULL) { *pNeeded = 0; } if (rcw != NULL) { *rcw = 0; } PTR_SyncBlock pSyncBlk = PTR_Object(TO_TADDR(objAddr))->PassiveGetSyncBlock(); if (pSyncBlk != NULL) { PTR_InteropSyncBlockInfo pInfo = pSyncBlk->GetInteropInfoNoCreate(); if (pInfo != NULL) { if (rcw != NULL) { *rcw = TO_TADDR(pInfo->m_externalComObjectContext); } DPTR(NewHolder<ManagedObjectComWrapperByIdMap>) mapHolder(PTR_TO_MEMBER_TADDR(InteropSyncBlockInfo, pInfo, m_managedObjectComWrapperMap)); DPTR(ManagedObjectComWrapperByIdMap *)ppMap(PTR_TO_MEMBER_TADDR(NewHolder<ManagedObjectComWrapperByIdMap>, mapHolder, m_value)); DPTR(ManagedObjectComWrapperByIdMap) pMap(TO_TADDR(*ppMap)); CQuickArrayList<CLRDATA_ADDRESS> comWrappers; if (pMap != NULL) { ManagedObjectComWrapperByIdMap::Iterator iter = pMap->Begin(); while (iter != pMap->End()) { comWrappers.Push(TO_CDADDR(iter->Value())); ++iter; } } if (pNeeded != NULL) { *pNeeded = (unsigned int)comWrappers.Size(); } for (SIZE_T pos = 0; pos < comWrappers.Size(); ++pos) { if (pos >= count) { hr = S_FALSE; break; } mowList[pos] = comWrappers[pos]; } } else { hr = S_FALSE; } } else { hr = S_FALSE; } SOSDacLeave(); return hr; #else // FEATURE_COMWRAPPERS return E_NOTIMPL; #endif // FEATURE_COMWRAPPERS } HRESULT ClrDataAccess::IsComWrappersCCW(CLRDATA_ADDRESS ccw, BOOL *isComWrappersCCW) { #ifdef FEATURE_COMWRAPPERS if (ccw == 0) { return E_INVALIDARG; } SOSDacEnter(); if (isComWrappersCCW != NULL) { TADDR managedObjectWrapperPtr = DACGetManagedObjectWrapperFromCCW(ccw); *isComWrappersCCW = managedObjectWrapperPtr != NULL; hr = *isComWrappersCCW ? S_OK : S_FALSE; } SOSDacLeave(); return hr; #else // FEATURE_COMWRAPPERS return E_NOTIMPL; #endif // FEATURE_COMWRAPPERS } HRESULT ClrDataAccess::GetComWrappersCCWData(CLRDATA_ADDRESS ccw, CLRDATA_ADDRESS *managedObject, int *refCount) { #ifdef FEATURE_COMWRAPPERS if (ccw == 0) { return E_INVALIDARG; } SOSDacEnter(); TADDR managedObjectWrapperPtr = DACGetManagedObjectWrapperFromCCW(ccw); if (managedObjectWrapperPtr != NULL) { PTR_ManagedObjectWrapper pMOW(managedObjectWrapperPtr); if (managedObject != NULL) { OBJECTREF managedObjectRef; if (SUCCEEDED(DACTryGetComWrappersObjectFromCCW(ccw, &managedObjectRef))) { *managedObject = PTR_HOST_TO_TADDR(managedObjectRef); } else { *managedObject = 0; } } if (refCount != NULL) { *refCount = (int)pMOW->RefCount; } } else { // Not a ComWrappers CCW hr = E_INVALIDARG; } SOSDacLeave(); return hr; #else // FEATURE_COMWRAPPERS return E_NOTIMPL; #endif // FEATURE_COMWRAPPERS } HRESULT ClrDataAccess::IsComWrappersRCW(CLRDATA_ADDRESS rcw, BOOL *isComWrappersRCW) { #ifdef FEATURE_COMWRAPPERS if (rcw == 0) { return E_INVALIDARG; } SOSDacEnter(); if (isComWrappersRCW != NULL) { PTR_ExternalObjectContext pRCW(TO_TADDR(rcw)); BOOL stillValid = TRUE; if(pRCW->SyncBlockIndex >= SyncBlockCache::s_pSyncBlockCache->m_SyncTableSize) { stillValid = FALSE; } PTR_SyncBlock pSyncBlk = NULL; if (stillValid) { PTR_SyncTableEntry ste = PTR_SyncTableEntry(dac_cast<TADDR>(g_pSyncTable) + (sizeof(SyncTableEntry) * pRCW->SyncBlockIndex)); pSyncBlk = ste->m_SyncBlock; if(pSyncBlk == NULL) { stillValid = FALSE; } } PTR_InteropSyncBlockInfo pInfo = NULL; if (stillValid) { pInfo = pSyncBlk->GetInteropInfoNoCreate(); if(pInfo == NULL) { stillValid = FALSE; } } if (stillValid) { stillValid = TO_TADDR(pInfo->m_externalComObjectContext) == PTR_HOST_TO_TADDR(pRCW); } *isComWrappersRCW = stillValid; hr = *isComWrappersRCW ? S_OK : S_FALSE; } SOSDacLeave(); return hr; #else // FEATURE_COMWRAPPERS return E_NOTIMPL; #endif // FEATURE_COMWRAPPERS } HRESULT ClrDataAccess::GetComWrappersRCWData(CLRDATA_ADDRESS rcw, CLRDATA_ADDRESS *identity) { #ifdef FEATURE_COMWRAPPERS if (rcw == 0) { return E_INVALIDARG; } SOSDacEnter(); PTR_ExternalObjectContext pEOC(TO_TADDR(rcw)); if (identity != NULL) { *identity = PTR_CDADDR(pEOC->Identity); } SOSDacLeave(); return hr; #else // FEATURE_COMWRAPPERS return E_NOTIMPL; #endif // FEATURE_COMWRAPPERS } namespace { BOOL TryReadTaggedMemoryState( CLRDATA_ADDRESS objAddr, ICorDebugDataTarget* target, CLRDATA_ADDRESS *taggedMemory = NULL, size_t *taggedMemorySizeInBytes = NULL) { BOOL hasTaggedMemory = FALSE; #ifdef FEATURE_OBJCMARSHAL EX_TRY_ALLOW_DATATARGET_MISSING_MEMORY { PTR_SyncBlock pSyncBlk = DACGetSyncBlockFromObjectPointer(CLRDATA_ADDRESS_TO_TADDR(objAddr), target); if (pSyncBlk != NULL) { PTR_InteropSyncBlockInfo pInfo = pSyncBlk->GetInteropInfoNoCreate(); if (pInfo != NULL) { CLRDATA_ADDRESS taggedMemoryLocal = PTR_CDADDR(pInfo->GetTaggedMemory()); if (taggedMemoryLocal != NULL) { hasTaggedMemory = TRUE; if (taggedMemory) *taggedMemory = taggedMemoryLocal; if (taggedMemorySizeInBytes) *taggedMemorySizeInBytes = pInfo->GetTaggedMemorySizeInBytes(); } } } } EX_END_CATCH_ALLOW_DATATARGET_MISSING_MEMORY; #endif // FEATURE_OBJCMARSHAL return hasTaggedMemory; } } HRESULT ClrDataAccess::IsTrackedType( CLRDATA_ADDRESS objAddr, BOOL *isTrackedType, BOOL *hasTaggedMemory) { if (objAddr == 0 || isTrackedType == NULL || hasTaggedMemory == NULL) { return E_INVALIDARG; } *isTrackedType = FALSE; *hasTaggedMemory = FALSE; SOSDacEnter(); TADDR mtTADDR = DACGetMethodTableFromObjectPointer(CLRDATA_ADDRESS_TO_TADDR(objAddr), m_pTarget); if (mtTADDR==NULL) hr = E_INVALIDARG; BOOL bFree = FALSE; MethodTable *mt = NULL; if (SUCCEEDED(hr)) { mt = PTR_MethodTable(mtTADDR); if (!DacValidateMethodTable(mt, bFree)) hr = E_INVALIDARG; } if (SUCCEEDED(hr)) { *isTrackedType = mt->IsTrackedReferenceWithFinalizer(); hr = *isTrackedType ? S_OK : S_FALSE; *hasTaggedMemory = TryReadTaggedMemoryState(objAddr, m_pTarget); } SOSDacLeave(); return hr; } HRESULT ClrDataAccess::GetTaggedMemory( CLRDATA_ADDRESS objAddr, CLRDATA_ADDRESS *taggedMemory, size_t *taggedMemorySizeInBytes) { if (objAddr == 0 || taggedMemory == NULL || taggedMemorySizeInBytes == NULL) { return E_INVALIDARG; } *taggedMemory = NULL; *taggedMemorySizeInBytes = 0; SOSDacEnter(); if (FALSE == TryReadTaggedMemoryState(objAddr, m_pTarget, taggedMemory, taggedMemorySizeInBytes)) { hr = S_FALSE; } SOSDacLeave(); return hr; }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/native/corehost/hostpolicy/hostpolicy.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <atomic> #include <condition_variable> #include <mutex> #include <pal.h> #include "args.h" #include <trace.h> #include "deps_resolver.h" #include <fx_muxer.h> #include <utils.h> #include "coreclr.h" #include <error_codes.h> #include "breadcrumbs.h" #include <host_startup_info.h> #include <corehost_context_contract.h> #include <hostpolicy.h> #include "hostpolicy_context.h" #include "bundle/runner.h" namespace { // Initialization information set through corehost_load. All other entry points assume this has already // been set and use it to perform the requested operation. Note that this being initialized does not // indicate that the runtime is loaded or that the runtime will be loaded (e.g. host commands). std::mutex g_init_lock; bool g_init_done; hostpolicy_init_t g_init; // hostpolicy tracks the context used to load and initialize coreclr. This is the first context that // is successfully created and used to load the runtime. There can only be one hostpolicy context. std::mutex g_context_lock; // Tracks the hostpolicy context. This is the one and only hostpolicy context. It represents the information // that hostpolicy will use or has already used to load and initialize coreclr. It will be set once a context // is initialized and updated to hold coreclr once the runtime is loaded. std::shared_ptr<hostpolicy_context_t> g_context; // Tracks whether the hostpolicy context is initializing (from start of creation of the first context // to loading coreclr). It will be false before initialization starts and after it succeeds or fails. // Attempts to get/create a context should block if the first context is initializing (i.e. this is true). // The condition variable is used to block on and signal changes to this state. std::atomic<bool> g_context_initializing(false); std::condition_variable g_context_initializing_cv; int HOSTPOLICY_CALLTYPE create_coreclr() { int rc; { std::lock_guard<std::mutex> context_lock { g_context_lock }; if (g_context == nullptr) { trace::error(_X("Hostpolicy has not been initialized")); return StatusCode::HostInvalidState; } if (g_context->coreclr != nullptr) { trace::error(_X("CoreClr has already been loaded")); return StatusCode::HostInvalidState; } // Verbose logging if (trace::is_enabled()) g_context->coreclr_properties.log_properties(); std::vector<char> host_path; pal::pal_clrstring(g_context->host_path, &host_path); const char *app_domain_friendly_name = g_context->host_mode == host_mode_t::libhost ? "clr_libhost" : "clrhost"; // Create a CoreCLR instance trace::verbose(_X("CoreCLR path = '%s', CoreCLR dir = '%s'"), g_context->clr_path.c_str(), g_context->clr_dir.c_str()); auto hr = coreclr_t::create( g_context->clr_dir, host_path.data(), app_domain_friendly_name, g_context->coreclr_properties, g_context->coreclr); if (!SUCCEEDED(hr)) { trace::error(_X("Failed to create CoreCLR, HRESULT: 0x%X"), hr); rc = StatusCode::CoreClrInitFailure; } else { rc = StatusCode::Success; } g_context_initializing.store(false); } g_context_initializing_cv.notify_all(); return rc; } int create_hostpolicy_context( hostpolicy_init_t &hostpolicy_init, const int argc, const pal::char_t *argv[], bool breadcrumbs_enabled, /*out*/ arguments_t *out_args = nullptr) { { std::unique_lock<std::mutex> lock{ g_context_lock }; g_context_initializing_cv.wait(lock, [] { return !g_context_initializing.load(); }); const hostpolicy_context_t *existing_context = g_context.get(); if (existing_context != nullptr) { trace::info(_X("Host context has already been initialized")); assert(existing_context->coreclr != nullptr); return StatusCode::Success_HostAlreadyInitialized; } g_context_initializing.store(true); } g_context_initializing_cv.notify_all(); arguments_t args; if (!parse_arguments(hostpolicy_init, argc, argv, args)) return StatusCode::LibHostInvalidArgs; if (out_args != nullptr) *out_args = args; std::unique_ptr<hostpolicy_context_t> context_local(new hostpolicy_context_t()); int rc = context_local->initialize(hostpolicy_init, args, breadcrumbs_enabled); if (rc != StatusCode::Success) { { std::lock_guard<std::mutex> lock{ g_context_lock }; g_context_initializing.store(false); } g_context_initializing_cv.notify_all(); return rc; } { std::lock_guard<std::mutex> lock{ g_context_lock }; g_context.reset(context_local.release()); } return StatusCode::Success; } const std::shared_ptr<hostpolicy_context_t> get_hostpolicy_context(bool require_runtime) { std::lock_guard<std::mutex> lock{ g_context_lock }; const std::shared_ptr<hostpolicy_context_t> existing_context = g_context; if (existing_context == nullptr) { trace::error(_X("Hostpolicy context has not been created")); return nullptr; } if (require_runtime && existing_context->coreclr == nullptr) { trace::error(_X("Runtime has not been loaded and initialized")); return nullptr; } return existing_context; } } int run_host_command( hostpolicy_init_t &hostpolicy_init, const arguments_t &args, pal::string_t* out_host_command_result = nullptr) { assert(out_host_command_result != nullptr); // Breadcrumbs are not enabled for API calls because they do not execute // the app and may be re-entry hostpolicy_context_t context {}; int rc = context.initialize(hostpolicy_init, args, false /* enable_breadcrumbs */); if (rc != StatusCode::Success) return rc; // Check for host command(s) if (pal::strcasecmp(hostpolicy_init.host_command.c_str(), _X("get-native-search-directories")) == 0) { const pal::char_t *value; if (!context.coreclr_properties.try_get(common_property::NativeDllSearchDirectories, &value)) { trace::error(_X("get-native-search-directories failed to find NATIVE_DLL_SEARCH_DIRECTORIES property")); return StatusCode::HostApiFailed; } assert(out_host_command_result != nullptr); out_host_command_result->assign(value); return StatusCode::Success; } return StatusCode::InvalidArgFailure; } int run_app_for_context( const hostpolicy_context_t &context, int argc, const pal::char_t **argv) { assert(context.coreclr != nullptr); // Initialize clr strings for arguments std::vector<std::vector<char>> argv_strs(argc); std::vector<const char*> argv_local(argc); for (int i = 0; i < argc; i++) { pal::pal_clrstring(argv[i], &argv_strs[i]); argv_local[i] = argv_strs[i].data(); } if (trace::is_enabled()) { pal::string_t arg_str; for (size_t i = 0; i < argv_local.size(); i++) { pal::string_t cur; pal::clr_palstring(argv_local[i], &cur); arg_str.append(cur); arg_str.append(_X(",")); } trace::info(_X("Launch host: %s, app: %s, argc: %d, args: %s"), context.host_path.c_str(), context.application.c_str(), argc, arg_str.c_str()); } std::vector<char> managed_app; pal::pal_clrstring(context.application, &managed_app); // Leave breadcrumbs for servicing. std::shared_ptr<breadcrumb_writer_t> writer; if (!context.breadcrumbs.empty()) { writer = breadcrumb_writer_t::begin_write(context.breadcrumbs); assert(context.breadcrumbs.empty()); } // Previous hostpolicy trace messages must be printed before executing assembly trace::flush(); // Execute the application unsigned int exit_code; auto hr = context.coreclr->execute_assembly( (int32_t)argv_local.size(), argv_local.data(), managed_app.data(), &exit_code); if (!SUCCEEDED(hr)) { trace::error(_X("Failed to execute managed app, HRESULT: 0x%X"), hr); return StatusCode::CoreClrExeFailure; } trace::info(_X("Execute managed assembly exit code: 0x%X"), exit_code); // Shut down the CoreCLR hr = context.coreclr->shutdown(reinterpret_cast<int*>(&exit_code)); if (!SUCCEEDED(hr)) { trace::warning(_X("Failed to shut down CoreCLR, HRESULT: 0x%X"), hr); } if (writer) { writer->end_write(); } return exit_code; } int HOSTPOLICY_CALLTYPE run_app(const int argc, const pal::char_t *argv[]) { const std::shared_ptr<hostpolicy_context_t> context = get_hostpolicy_context(/*require_runtime*/ true); if (context == nullptr) return StatusCode::HostInvalidState; return run_app_for_context(*context, argc, argv); } void trace_hostpolicy_entrypoint_invocation(const pal::string_t& entryPointName) { trace::info(_X("--- Invoked hostpolicy [commit hash: %s] [%s,%s,%s][%s] %s = {"), _STRINGIFY(REPO_COMMIT_HASH), _STRINGIFY(HOST_POLICY_PKG_NAME), _STRINGIFY(HOST_POLICY_PKG_VER), _STRINGIFY(HOST_POLICY_PKG_REL_DIR), get_arch(), entryPointName.c_str()); } // // Loads and initilizes the hostpolicy. // // If hostpolicy is already initalized, the library will not be // reinitialized. // SHARED_API int HOSTPOLICY_CALLTYPE corehost_load(host_interface_t* init) { assert(init != nullptr); std::lock_guard<std::mutex> lock{ g_init_lock }; if (g_init_done) { // Since the host command is set during load _and_ // load is considered re-entrant due to how testing is // done, permit the re-initialization of the host command. hostpolicy_init_t::init_host_command(init, &g_init); return StatusCode::Success; } trace::setup(); g_init = hostpolicy_init_t{}; if (!hostpolicy_init_t::init(init, &g_init)) { g_init_done = false; return StatusCode::LibHostInitFailure; } g_init_done = true; return StatusCode::Success; } void trace_corehost_init( const hostpolicy_init_t &hostpolicy_init, const int argc, const pal::char_t* argv[], const pal::string_t& location) { if (trace::is_enabled()) { trace_hostpolicy_entrypoint_invocation(location); for (int i = 0; i < argc; ++i) { trace::info(_X("%s"), argv[i]); } trace::info(_X("}")); const pal::char_t *host_mode_str; switch (hostpolicy_init.host_mode) { case host_mode_t::muxer: host_mode_str = _X("muxer"); break; case host_mode_t::apphost: host_mode_str = _X("apphost"); break; case host_mode_t::split_fx: host_mode_str = _X("split_fx"); break; case host_mode_t::libhost: host_mode_str = _X("libhost"); break; case host_mode_t::invalid: default: host_mode_str = _X("invalid"); break; } trace::info(_X("Mode: %s"), host_mode_str); trace::info(_X("Deps file: %s"), hostpolicy_init.deps_file.c_str()); for (const auto& probe : hostpolicy_init.probe_paths) { trace::info(_X("Additional probe dir: %s"), probe.c_str()); } } } int corehost_main_init( hostpolicy_init_t& hostpolicy_init, const int argc, const pal::char_t* argv[], const pal::string_t& location) { // Take care of arguments if (!hostpolicy_init.host_info.is_valid(hostpolicy_init.host_mode)) { // For backwards compat (older hostfxr), default the host_info hostpolicy_init.host_info.parse(argc, argv); } if (bundle::info_t::is_single_file_bundle()) { const bundle::runner_t* bundle = bundle::runner_t::app(); StatusCode status = bundle->process_manifest_and_extract(); if (status != StatusCode::Success) { return status; } if (bundle->is_netcoreapp3_compat_mode()) { auto extracted_assembly = bundle->extraction_path(); auto app_name = hostpolicy_init.host_info.get_app_name() + _X(".dll"); append_path(&extracted_assembly, app_name.c_str()); assert(pal::file_exists(extracted_assembly)); hostpolicy_init.host_info.app_path = extracted_assembly; } } trace_corehost_init(hostpolicy_init, argc, argv, location); return StatusCode::Success; } SHARED_API int HOSTPOLICY_CALLTYPE corehost_main(const int argc, const pal::char_t* argv[]) { int rc = corehost_main_init(g_init, argc, argv, _X("corehost_main")); if (rc != StatusCode::Success) return rc; arguments_t args; assert(g_context == nullptr); rc = create_hostpolicy_context(g_init, argc, argv, true /* breadcrumbs_enabled */, &args); if (rc != StatusCode::Success) return rc; rc = create_coreclr(); if (rc != StatusCode::Success) return rc; return run_app(args.app_argc, args.app_argv); } SHARED_API int HOSTPOLICY_CALLTYPE corehost_main_with_output_buffer(const int argc, const pal::char_t* argv[], pal::char_t buffer[], int32_t buffer_size, int32_t* required_buffer_size) { int rc = corehost_main_init(g_init, argc, argv, _X("corehost_main_with_output_buffer")); if (rc != StatusCode::Success) return rc; if (g_init.host_command == _X("get-native-search-directories")) { arguments_t args; if (!parse_arguments(g_init, argc, argv, args)) return StatusCode::LibHostInvalidArgs; pal::string_t output_string; rc = run_host_command(g_init, args, &output_string); if (rc != StatusCode::Success) return rc; // Get length in character count not including null terminator int32_t len = static_cast<int32_t>(output_string.length()); if (len + 1 > buffer_size) { rc = StatusCode::HostApiBufferTooSmall; *required_buffer_size = len + 1; trace::info(_X("get-native-search-directories failed with buffer too small"), output_string.c_str()); } else { output_string.copy(buffer, len); buffer[len] = '\0'; *required_buffer_size = 0; trace::info(_X("get-native-search-directories success: %s"), output_string.c_str()); } } else { trace::error(_X("Unknown command: %s"), g_init.host_command.c_str()); rc = StatusCode::LibHostUnknownCommand; } return rc; } void trace_corehost_libhost_init(const hostpolicy_init_t &hostpolicy_init, const pal::string_t& location) { // Host info should always be valid in the delegate scenario assert(hostpolicy_init.host_info.is_valid(host_mode_t::libhost)); // Single-file bundle is only expected in apphost mode. assert(!bundle::info_t::is_single_file_bundle()); trace_corehost_init(hostpolicy_init, 0, nullptr, location); } namespace { int HOSTPOLICY_CALLTYPE get_delegate(coreclr_delegate_type type, void **delegate) { if (delegate == nullptr) return StatusCode::InvalidArgFailure; const std::shared_ptr<hostpolicy_context_t> context = get_hostpolicy_context(/*require_runtime*/ true); if (context == nullptr) return StatusCode::HostInvalidState; coreclr_t *coreclr = context->coreclr.get(); switch (type) { case coreclr_delegate_type::com_activation: return coreclr->create_delegate( "System.Private.CoreLib", "Internal.Runtime.InteropServices.ComActivator", "GetClassFactoryForTypeInternal", delegate); case coreclr_delegate_type::load_in_memory_assembly: return coreclr->create_delegate( "System.Private.CoreLib", "Internal.Runtime.InteropServices.InMemoryAssemblyLoader", "LoadInMemoryAssembly", delegate); case coreclr_delegate_type::winrt_activation: return StatusCode::InvalidArgFailure; case coreclr_delegate_type::com_register: return coreclr->create_delegate( "System.Private.CoreLib", "Internal.Runtime.InteropServices.ComActivator", "RegisterClassForTypeInternal", delegate); case coreclr_delegate_type::com_unregister: return coreclr->create_delegate( "System.Private.CoreLib", "Internal.Runtime.InteropServices.ComActivator", "UnregisterClassForTypeInternal", delegate); case coreclr_delegate_type::load_assembly_and_get_function_pointer: return coreclr->create_delegate( "System.Private.CoreLib", "Internal.Runtime.InteropServices.ComponentActivator", "LoadAssemblyAndGetFunctionPointer", delegate); case coreclr_delegate_type::get_function_pointer: return coreclr->create_delegate( "System.Private.CoreLib", "Internal.Runtime.InteropServices.ComponentActivator", "GetFunctionPointer", delegate); default: return StatusCode::LibHostInvalidArgs; } } int HOSTPOLICY_CALLTYPE get_property(const pal::char_t *key, const pal::char_t **value) { if (key == nullptr) return StatusCode::InvalidArgFailure; const std::shared_ptr<hostpolicy_context_t> context = get_hostpolicy_context(/*require_runtime*/ false); if (context == nullptr) return StatusCode::HostInvalidState; if (!context->coreclr_properties.try_get(key, value)) return StatusCode::HostPropertyNotFound; return StatusCode::Success; } int HOSTPOLICY_CALLTYPE set_property(const pal::char_t *key, const pal::char_t *value) { if (key == nullptr) return StatusCode::InvalidArgFailure; std::lock_guard<std::mutex> lock{ g_context_lock }; if (g_context == nullptr || g_context->coreclr != nullptr) { trace::error(_X("Setting properties is only allowed before runtime has been loaded and initialized")); return HostInvalidState; } if (value != nullptr) { g_context->coreclr_properties.add(key, value); } else { g_context->coreclr_properties.remove(key); } return StatusCode::Success; } int HOSTPOLICY_CALLTYPE get_properties(size_t * count, const pal::char_t **keys, const pal::char_t **values) { if (count == nullptr) return StatusCode::InvalidArgFailure; const std::shared_ptr<hostpolicy_context_t> context = get_hostpolicy_context(/*require_runtime*/ false); if (context == nullptr) return StatusCode::HostInvalidState; size_t actualCount = context->coreclr_properties.count(); size_t input_count = *count; *count = actualCount; if (input_count < actualCount || keys == nullptr || values == nullptr) return StatusCode::HostApiBufferTooSmall; int index = 0; std::function<void (const pal::string_t &,const pal::string_t &)> callback = [&] (const pal::string_t& key, const pal::string_t& value) { keys[index] = key.data(); values[index] = value.data(); ++index; }; context->coreclr_properties.enumerate(callback); return StatusCode::Success; } bool matches_existing_properties(const coreclr_property_bag_t &properties, const corehost_initialize_request_t *init_request) { bool hasDifferentProperties = false; size_t len = init_request->config_keys.len; for (size_t i = 0; i < len; ++i) { const pal::char_t *key = init_request->config_keys.arr[i]; const pal::char_t *value = init_request->config_values.arr[i]; const pal::char_t *existingValue; if (properties.try_get(key, &existingValue)) { if (pal::strcmp(existingValue, value) != 0) { trace::warning(_X("The property [%s] has a different value [%s] from that in the previously loaded runtime [%s]"), key, value, existingValue); hasDifferentProperties = true; } } else { trace::warning(_X("The property [%s] is not present in the previously loaded runtime."), key); hasDifferentProperties = true; } } if (len > 0 && !hasDifferentProperties) trace::info(_X("All specified properties match those in the previously loaded runtime")); return !hasDifferentProperties; } } // Initializes hostpolicy. Calculates everything required to start the runtime and creates a context to track // that information // // Parameters: // init_request // struct containing information about the initialization request. If hostpolicy is not yet initialized, // this is expected to be nullptr. If hostpolicy is already initialized, this should not be nullptr and // this function will use the struct to check for compatibility with the way in which hostpolicy was // previously initialized. // options // initialization options // context_contract // [out] if initialization is successful, populated with a contract for performing operations on hostpolicy // // Return value: // Success - Initialization was succesful // Success_HostAlreadyInitialized - Request is compatible with already initialized hostpolicy // Success_DifferentRuntimeProperties - Request has runtime properties that differ from already initialized hostpolicy // // This function does not load the runtime // // If a previous request to initialize hostpolicy was made, but the runtime was not yet loaded, this function will // block until the runtime is loaded. // // This function assumes corehost_load has already been called. It uses the init information set through that // call - not the struct passed into this function - to create a context. // // Both Success_HostAlreadyInitialized and Success_DifferentRuntimeProperties codes are considered successful // initializations. In the case of Success_DifferentRuntimeProperties, it is left to the consumer to verify that // the difference in properties is acceptable. // SHARED_API int HOSTPOLICY_CALLTYPE corehost_initialize(const corehost_initialize_request_t *init_request, uint32_t options, /*out*/ corehost_context_contract *context_contract) { if (context_contract == nullptr) return StatusCode::InvalidArgFailure; bool version_set = (options & initialization_options_t::context_contract_version_set) != 0; bool wait_for_initialized = (options & initialization_options_t::wait_for_initialized) != 0; bool get_contract = (options & initialization_options_t::get_contract) != 0; if (wait_for_initialized && get_contract) { trace::error(_X("Specifying both initialization options for wait_for_initialized and get_contract is not allowed")); return StatusCode::InvalidArgFailure; } if (get_contract) { if (init_request != nullptr) { trace::error(_X("Initialization request is expected to be null when getting the already initialized contract")); return StatusCode::InvalidArgFailure; } } else { std::unique_lock<std::mutex> lock { g_context_lock }; bool already_initializing = g_context_initializing.load(); bool already_initialized = g_context.get() != nullptr; if (wait_for_initialized) { trace::verbose(_X("Initialization option to wait for initialize request is set")); if (init_request == nullptr) { trace::error(_X("Initialization request is expected to be non-null when waiting for initialize request option is set")); return StatusCode::InvalidArgFailure; } // If we are not already initializing or done initializing, wait until another context initialization has started if (!already_initialized && !already_initializing) { trace::info(_X("Waiting for another request to initialize hostpolicy")); g_context_initializing_cv.wait(lock, [&] { return g_context_initializing.load(); }); } } else { if (init_request != nullptr && !already_initialized && !already_initializing) { trace::error(_X("Initialization request is expected to be null for the first initialization request")); return StatusCode::InvalidArgFailure; } if (init_request == nullptr && (already_initializing || already_initialized)) { trace::error(_X("Initialization request is expected to be non-null for requests other than the first one")); return StatusCode::InvalidArgFailure; } } } // Trace entry point information using previously set init information. // This function does not modify any global state. trace_corehost_libhost_init(g_init, _X("corehost_initialize")); int rc; if (wait_for_initialized) { // Wait for context initialization to complete std::unique_lock<std::mutex> lock{ g_context_lock }; g_context_initializing_cv.wait(lock, [] { return !g_context_initializing.load(); }); const hostpolicy_context_t *existing_context = g_context.get(); if (existing_context == nullptr || existing_context->coreclr == nullptr) { trace::info(_X("Option to wait for initialize request was set, but that request did not result in initialization")); return StatusCode::HostInvalidState; } rc = StatusCode::Success_HostAlreadyInitialized; } else if (get_contract) { const std::shared_ptr<hostpolicy_context_t> context = get_hostpolicy_context(/*require_runtime*/ true); if (context == nullptr) { trace::error(_X("Option to get the contract for the initialized hostpolicy was set, but hostpolicy has not been initialized")); return StatusCode::HostInvalidState; } rc = StatusCode::Success; } else { rc = create_hostpolicy_context(g_init, 0 /*argc*/, nullptr /*argv*/, g_init.host_mode != host_mode_t::libhost); if (rc != StatusCode::Success && rc != StatusCode::Success_HostAlreadyInitialized) return rc; } if (rc == StatusCode::Success_HostAlreadyInitialized) { assert(init_request != nullptr && init_request->version >= offsetof(corehost_initialize_request_t, config_values) + sizeof(init_request->config_values) && init_request->config_keys.len == init_request->config_values.len); const std::shared_ptr<hostpolicy_context_t> context = get_hostpolicy_context(/*require_runtime*/ true); if (context == nullptr) return StatusCode::HostInvalidState; // Compare the current context with this request (properties) if (!matches_existing_properties(context->coreclr_properties, init_request)) rc = StatusCode::Success_DifferentRuntimeProperties; } // If version wasn't set, then it would have the original size of corehost_context_contract, which is 7 * sizeof(size_t). size_t version_lo = version_set ? context_contract->version : 7 * sizeof(size_t); context_contract->version = sizeof(corehost_context_contract); context_contract->get_property_value = get_property; context_contract->set_property_value = set_property; context_contract->get_properties = get_properties; context_contract->load_runtime = create_coreclr; context_contract->run_app = run_app; context_contract->get_runtime_delegate = get_delegate; // An old hostfxr may not have provided enough space for these fields. // The version_lo (sizeof) the old hostfxr saw at build time will be // smaller and we should not attempt to write the fields in that case. if (version_lo >= offsetof(corehost_context_contract, last_known_delegate_type) + sizeof(context_contract->last_known_delegate_type)) { context_contract->last_known_delegate_type = (size_t)coreclr_delegate_type::__last - 1; } return rc; } SHARED_API int HOSTPOLICY_CALLTYPE corehost_unload() { { std::lock_guard<std::mutex> lock{ g_context_lock }; if (g_context != nullptr && g_context->coreclr != nullptr) return StatusCode::Success; // Allow re-initializing if runtime has not been loaded g_context.reset(); g_context_initializing.store(false); } g_context_initializing_cv.notify_all(); std::lock_guard<std::mutex> init_lock{ g_init_lock }; g_init_done = false; return StatusCode::Success; } SHARED_API int HOSTPOLICY_CALLTYPE corehost_resolve_component_dependencies( const pal::char_t *component_main_assembly_path, corehost_resolve_component_dependencies_result_fn result) { if (trace::is_enabled()) { trace_hostpolicy_entrypoint_invocation(_X("corehost_resolve_component_dependencies")); trace::info(_X(" Component main assembly path: %s"), component_main_assembly_path); trace::info(_X("}")); for (const auto& probe : g_init.probe_paths) { trace::info(_X("Additional probe dir: %s"), probe.c_str()); } } // IMPORTANT: g_init is static/global and thus potentially accessed from multiple threads // We must only use it as read-only here (unlike the run scenarios which own it). // For example the frameworks in g_init.fx_definitions can't be used "as-is" by the resolver // right now as it would try to re-parse the .deps.json and thus modify the objects. // The assumption is that component dependency resolution will only be called // when the coreclr is hosted through this hostpolicy and thus it will // have already called corehost_main_init. if (!g_init.host_info.is_valid(g_init.host_mode)) { trace::error(_X("Hostpolicy must be initialized and corehost_main must have been called before calling corehost_resolve_component_dependencies.")); return StatusCode::CoreHostLibLoadFailure; } // If the current host mode is libhost, use apphost instead. host_mode_t host_mode = g_init.host_mode == host_mode_t::libhost ? host_mode_t::apphost : g_init.host_mode; // Initialize arguments (basically the structure describing the input app/component to resolve) arguments_t args; if (!init_arguments( component_main_assembly_path, g_init.host_info, g_init.tfm, host_mode, /* additional_deps_serialized */ pal::string_t(), // Additional deps - don't use those from the app, they're already in the app /* deps_file */ pal::string_t(), // Avoid using any other deps file than the one next to the component g_init.probe_paths, /* init_from_file_system */ true, args)) { return StatusCode::LibHostInvalidArgs; } args.trace(); // Initialize the "app" framework definition. auto app = new fx_definition_t(); // For now intentionally don't process .runtimeconfig.json since we don't perform framework resolution. // Call parse_runtime_config since it initializes the defaults for various settings // but we don't have any .runtimeconfig.json for the component, so pass in empty paths. // Empty paths is a valid case and the method will simply skip parsing anything. app->parse_runtime_config(pal::string_t(), pal::string_t(), runtime_config_t::settings_t()); if (!app->get_runtime_config().is_valid()) { // This should really never happen, but fail gracefully if it does anyway. assert(false); delete app; app = nullptr; trace::error(_X("Failed to initialize empty runtime config for the component.")); return StatusCode::InvalidConfigFile; } // For components we don't want to resolve anything from the frameworks, since those will be supplied by the app. // So only use the component as the "app" framework. fx_definition_vector_t component_fx_definitions; component_fx_definitions.push_back(std::unique_ptr<fx_definition_t>(app)); // TODO Review: Since we're only passing the one component framework, the resolver will not consider // frameworks from the app for probing paths. So potential references to paths inside frameworks will not resolve. // The RID graph still has to come from the actuall root framework, so take that from the g_init.fx_definitions // which are the frameworks for the app. deps_resolver_t resolver( args, component_fx_definitions, &get_root_framework(g_init.fx_definitions).get_deps().get_rid_fallback_graph(), true); pal::string_t resolver_errors; if (!resolver.valid(&resolver_errors)) { trace::error(_X("Error initializing the dependency resolver: %s"), resolver_errors.c_str()); return StatusCode::ResolverInitFailure; } // Don't write breadcrumbs since we're not executing the app, just resolving dependencies // doesn't guarantee that they will actually execute. probe_paths_t probe_paths; if (!resolver.resolve_probe_paths(&probe_paths, nullptr, /* ignore_missing_assemblies */ true)) { return StatusCode::ResolverResolveFailure; } if (trace::is_enabled()) { trace::info(_X("corehost_resolve_component_dependencies results: {")); trace::info(_X(" assembly_paths: '%s'"), probe_paths.tpa.data()); trace::info(_X(" native_search_paths: '%s'"), probe_paths.native.data()); trace::info(_X(" resource_search_paths: '%s'"), probe_paths.resources.data()); trace::info(_X("}")); } result( probe_paths.tpa.data(), probe_paths.native.data(), probe_paths.resources.data()); return 0; } // // Sets a callback which is to be used to write errors to. // // Parameters: // error_writer // A callback function which will be invoked every time an error is to be reported. // Or nullptr to unregister previously registered callback and return to the default behavior. // Return value: // The previously registered callback (which is now unregistered), or nullptr if no previous callback // was registered // // The error writer is registered per-thread, so the registration is thread-local. On each thread // only one callback can be registered. Subsequent registrations overwrite the previous ones. // // By default no callback is registered in which case the errors are written to stderr. // // Each call to the error writer is sort of like writing a single line (the EOL character is omitted). // Multiple calls to the error writer may occure for one failure. // SHARED_API corehost_error_writer_fn HOSTPOLICY_CALLTYPE corehost_set_error_writer(corehost_error_writer_fn error_writer) { return trace::set_error_writer(error_writer); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <atomic> #include <condition_variable> #include <mutex> #include <pal.h> #include "args.h" #include <trace.h> #include "deps_resolver.h" #include <fx_muxer.h> #include <utils.h> #include "coreclr.h" #include <error_codes.h> #include "breadcrumbs.h" #include <host_startup_info.h> #include <corehost_context_contract.h> #include <hostpolicy.h> #include "hostpolicy_context.h" #include "bundle/runner.h" namespace { // Initialization information set through corehost_load. All other entry points assume this has already // been set and use it to perform the requested operation. Note that this being initialized does not // indicate that the runtime is loaded or that the runtime will be loaded (e.g. host commands). std::mutex g_init_lock; bool g_init_done; hostpolicy_init_t g_init; // hostpolicy tracks the context used to load and initialize coreclr. This is the first context that // is successfully created and used to load the runtime. There can only be one hostpolicy context. std::mutex g_context_lock; // Tracks the hostpolicy context. This is the one and only hostpolicy context. It represents the information // that hostpolicy will use or has already used to load and initialize coreclr. It will be set once a context // is initialized and updated to hold coreclr once the runtime is loaded. std::shared_ptr<hostpolicy_context_t> g_context; // Tracks whether the hostpolicy context is initializing (from start of creation of the first context // to loading coreclr). It will be false before initialization starts and after it succeeds or fails. // Attempts to get/create a context should block if the first context is initializing (i.e. this is true). // The condition variable is used to block on and signal changes to this state. std::atomic<bool> g_context_initializing(false); std::condition_variable g_context_initializing_cv; int HOSTPOLICY_CALLTYPE create_coreclr() { int rc; { std::lock_guard<std::mutex> context_lock { g_context_lock }; if (g_context == nullptr) { trace::error(_X("Hostpolicy has not been initialized")); return StatusCode::HostInvalidState; } if (g_context->coreclr != nullptr) { trace::error(_X("CoreClr has already been loaded")); return StatusCode::HostInvalidState; } // Verbose logging if (trace::is_enabled()) g_context->coreclr_properties.log_properties(); std::vector<char> host_path; pal::pal_clrstring(g_context->host_path, &host_path); const char *app_domain_friendly_name = g_context->host_mode == host_mode_t::libhost ? "clr_libhost" : "clrhost"; // Create a CoreCLR instance trace::verbose(_X("CoreCLR path = '%s', CoreCLR dir = '%s'"), g_context->clr_path.c_str(), g_context->clr_dir.c_str()); auto hr = coreclr_t::create( g_context->clr_dir, host_path.data(), app_domain_friendly_name, g_context->coreclr_properties, g_context->coreclr); if (!SUCCEEDED(hr)) { trace::error(_X("Failed to create CoreCLR, HRESULT: 0x%X"), hr); rc = StatusCode::CoreClrInitFailure; } else { rc = StatusCode::Success; } g_context_initializing.store(false); } g_context_initializing_cv.notify_all(); return rc; } int create_hostpolicy_context( hostpolicy_init_t &hostpolicy_init, const int argc, const pal::char_t *argv[], bool breadcrumbs_enabled, /*out*/ arguments_t *out_args = nullptr) { { std::unique_lock<std::mutex> lock{ g_context_lock }; g_context_initializing_cv.wait(lock, [] { return !g_context_initializing.load(); }); const hostpolicy_context_t *existing_context = g_context.get(); if (existing_context != nullptr) { trace::info(_X("Host context has already been initialized")); assert(existing_context->coreclr != nullptr); return StatusCode::Success_HostAlreadyInitialized; } g_context_initializing.store(true); } g_context_initializing_cv.notify_all(); arguments_t args; if (!parse_arguments(hostpolicy_init, argc, argv, args)) return StatusCode::LibHostInvalidArgs; if (out_args != nullptr) *out_args = args; std::unique_ptr<hostpolicy_context_t> context_local(new hostpolicy_context_t()); int rc = context_local->initialize(hostpolicy_init, args, breadcrumbs_enabled); if (rc != StatusCode::Success) { { std::lock_guard<std::mutex> lock{ g_context_lock }; g_context_initializing.store(false); } g_context_initializing_cv.notify_all(); return rc; } { std::lock_guard<std::mutex> lock{ g_context_lock }; g_context.reset(context_local.release()); } return StatusCode::Success; } const std::shared_ptr<hostpolicy_context_t> get_hostpolicy_context(bool require_runtime) { std::lock_guard<std::mutex> lock{ g_context_lock }; const std::shared_ptr<hostpolicy_context_t> existing_context = g_context; if (existing_context == nullptr) { trace::error(_X("Hostpolicy context has not been created")); return nullptr; } if (require_runtime && existing_context->coreclr == nullptr) { trace::error(_X("Runtime has not been loaded and initialized")); return nullptr; } return existing_context; } } int run_host_command( hostpolicy_init_t &hostpolicy_init, const arguments_t &args, pal::string_t* out_host_command_result = nullptr) { assert(out_host_command_result != nullptr); // Breadcrumbs are not enabled for API calls because they do not execute // the app and may be re-entry hostpolicy_context_t context {}; int rc = context.initialize(hostpolicy_init, args, false /* enable_breadcrumbs */); if (rc != StatusCode::Success) return rc; // Check for host command(s) if (pal::strcasecmp(hostpolicy_init.host_command.c_str(), _X("get-native-search-directories")) == 0) { const pal::char_t *value; if (!context.coreclr_properties.try_get(common_property::NativeDllSearchDirectories, &value)) { trace::error(_X("get-native-search-directories failed to find NATIVE_DLL_SEARCH_DIRECTORIES property")); return StatusCode::HostApiFailed; } assert(out_host_command_result != nullptr); out_host_command_result->assign(value); return StatusCode::Success; } return StatusCode::InvalidArgFailure; } int run_app_for_context( const hostpolicy_context_t &context, int argc, const pal::char_t **argv) { assert(context.coreclr != nullptr); // Initialize clr strings for arguments std::vector<std::vector<char>> argv_strs(argc); std::vector<const char*> argv_local(argc); for (int i = 0; i < argc; i++) { pal::pal_clrstring(argv[i], &argv_strs[i]); argv_local[i] = argv_strs[i].data(); } if (trace::is_enabled()) { pal::string_t arg_str; for (size_t i = 0; i < argv_local.size(); i++) { pal::string_t cur; pal::clr_palstring(argv_local[i], &cur); arg_str.append(cur); arg_str.append(_X(",")); } trace::info(_X("Launch host: %s, app: %s, argc: %d, args: %s"), context.host_path.c_str(), context.application.c_str(), argc, arg_str.c_str()); } std::vector<char> managed_app; pal::pal_clrstring(context.application, &managed_app); // Leave breadcrumbs for servicing. std::shared_ptr<breadcrumb_writer_t> writer; if (!context.breadcrumbs.empty()) { writer = breadcrumb_writer_t::begin_write(context.breadcrumbs); assert(context.breadcrumbs.empty()); } // Previous hostpolicy trace messages must be printed before executing assembly trace::flush(); // Execute the application unsigned int exit_code; auto hr = context.coreclr->execute_assembly( (int32_t)argv_local.size(), argv_local.data(), managed_app.data(), &exit_code); if (!SUCCEEDED(hr)) { trace::error(_X("Failed to execute managed app, HRESULT: 0x%X"), hr); return StatusCode::CoreClrExeFailure; } trace::info(_X("Execute managed assembly exit code: 0x%X"), exit_code); // Shut down the CoreCLR hr = context.coreclr->shutdown(reinterpret_cast<int*>(&exit_code)); if (!SUCCEEDED(hr)) { trace::warning(_X("Failed to shut down CoreCLR, HRESULT: 0x%X"), hr); } if (writer) { writer->end_write(); } return exit_code; } int HOSTPOLICY_CALLTYPE run_app(const int argc, const pal::char_t *argv[]) { const std::shared_ptr<hostpolicy_context_t> context = get_hostpolicy_context(/*require_runtime*/ true); if (context == nullptr) return StatusCode::HostInvalidState; return run_app_for_context(*context, argc, argv); } void trace_hostpolicy_entrypoint_invocation(const pal::string_t& entryPointName) { trace::info(_X("--- Invoked hostpolicy [commit hash: %s] [%s,%s,%s][%s] %s = {"), _STRINGIFY(REPO_COMMIT_HASH), _STRINGIFY(HOST_POLICY_PKG_NAME), _STRINGIFY(HOST_POLICY_PKG_VER), _STRINGIFY(HOST_POLICY_PKG_REL_DIR), get_arch(), entryPointName.c_str()); } // // Loads and initilizes the hostpolicy. // // If hostpolicy is already initalized, the library will not be // reinitialized. // SHARED_API int HOSTPOLICY_CALLTYPE corehost_load(host_interface_t* init) { assert(init != nullptr); std::lock_guard<std::mutex> lock{ g_init_lock }; if (g_init_done) { // Since the host command is set during load _and_ // load is considered re-entrant due to how testing is // done, permit the re-initialization of the host command. hostpolicy_init_t::init_host_command(init, &g_init); return StatusCode::Success; } trace::setup(); g_init = hostpolicy_init_t{}; if (!hostpolicy_init_t::init(init, &g_init)) { g_init_done = false; return StatusCode::LibHostInitFailure; } g_init_done = true; return StatusCode::Success; } void trace_corehost_init( const hostpolicy_init_t &hostpolicy_init, const int argc, const pal::char_t* argv[], const pal::string_t& location) { if (trace::is_enabled()) { trace_hostpolicy_entrypoint_invocation(location); for (int i = 0; i < argc; ++i) { trace::info(_X("%s"), argv[i]); } trace::info(_X("}")); const pal::char_t *host_mode_str; switch (hostpolicy_init.host_mode) { case host_mode_t::muxer: host_mode_str = _X("muxer"); break; case host_mode_t::apphost: host_mode_str = _X("apphost"); break; case host_mode_t::split_fx: host_mode_str = _X("split_fx"); break; case host_mode_t::libhost: host_mode_str = _X("libhost"); break; case host_mode_t::invalid: default: host_mode_str = _X("invalid"); break; } trace::info(_X("Mode: %s"), host_mode_str); trace::info(_X("Deps file: %s"), hostpolicy_init.deps_file.c_str()); for (const auto& probe : hostpolicy_init.probe_paths) { trace::info(_X("Additional probe dir: %s"), probe.c_str()); } } } int corehost_main_init( hostpolicy_init_t& hostpolicy_init, const int argc, const pal::char_t* argv[], const pal::string_t& location) { // Take care of arguments if (!hostpolicy_init.host_info.is_valid(hostpolicy_init.host_mode)) { // For backwards compat (older hostfxr), default the host_info hostpolicy_init.host_info.parse(argc, argv); } if (bundle::info_t::is_single_file_bundle()) { const bundle::runner_t* bundle = bundle::runner_t::app(); StatusCode status = bundle->process_manifest_and_extract(); if (status != StatusCode::Success) { return status; } if (bundle->is_netcoreapp3_compat_mode()) { auto extracted_assembly = bundle->extraction_path(); auto app_name = hostpolicy_init.host_info.get_app_name() + _X(".dll"); append_path(&extracted_assembly, app_name.c_str()); assert(pal::file_exists(extracted_assembly)); hostpolicy_init.host_info.app_path = extracted_assembly; } } trace_corehost_init(hostpolicy_init, argc, argv, location); return StatusCode::Success; } SHARED_API int HOSTPOLICY_CALLTYPE corehost_main(const int argc, const pal::char_t* argv[]) { int rc = corehost_main_init(g_init, argc, argv, _X("corehost_main")); if (rc != StatusCode::Success) return rc; arguments_t args; assert(g_context == nullptr); rc = create_hostpolicy_context(g_init, argc, argv, true /* breadcrumbs_enabled */, &args); if (rc != StatusCode::Success) return rc; rc = create_coreclr(); if (rc != StatusCode::Success) return rc; return run_app(args.app_argc, args.app_argv); } SHARED_API int HOSTPOLICY_CALLTYPE corehost_main_with_output_buffer(const int argc, const pal::char_t* argv[], pal::char_t buffer[], int32_t buffer_size, int32_t* required_buffer_size) { int rc = corehost_main_init(g_init, argc, argv, _X("corehost_main_with_output_buffer")); if (rc != StatusCode::Success) return rc; if (g_init.host_command == _X("get-native-search-directories")) { arguments_t args; if (!parse_arguments(g_init, argc, argv, args)) return StatusCode::LibHostInvalidArgs; pal::string_t output_string; rc = run_host_command(g_init, args, &output_string); if (rc != StatusCode::Success) return rc; // Get length in character count not including null terminator int32_t len = static_cast<int32_t>(output_string.length()); if (len + 1 > buffer_size) { rc = StatusCode::HostApiBufferTooSmall; *required_buffer_size = len + 1; trace::info(_X("get-native-search-directories failed with buffer too small"), output_string.c_str()); } else { output_string.copy(buffer, len); buffer[len] = '\0'; *required_buffer_size = 0; trace::info(_X("get-native-search-directories success: %s"), output_string.c_str()); } } else { trace::error(_X("Unknown command: %s"), g_init.host_command.c_str()); rc = StatusCode::LibHostUnknownCommand; } return rc; } void trace_corehost_libhost_init(const hostpolicy_init_t &hostpolicy_init, const pal::string_t& location) { // Host info should always be valid in the delegate scenario assert(hostpolicy_init.host_info.is_valid(host_mode_t::libhost)); // Single-file bundle is only expected in apphost mode. assert(!bundle::info_t::is_single_file_bundle()); trace_corehost_init(hostpolicy_init, 0, nullptr, location); } namespace { int HOSTPOLICY_CALLTYPE get_delegate(coreclr_delegate_type type, void **delegate) { if (delegate == nullptr) return StatusCode::InvalidArgFailure; const std::shared_ptr<hostpolicy_context_t> context = get_hostpolicy_context(/*require_runtime*/ true); if (context == nullptr) return StatusCode::HostInvalidState; coreclr_t *coreclr = context->coreclr.get(); switch (type) { case coreclr_delegate_type::com_activation: return coreclr->create_delegate( "System.Private.CoreLib", "Internal.Runtime.InteropServices.ComActivator", "GetClassFactoryForTypeInternal", delegate); case coreclr_delegate_type::load_in_memory_assembly: return coreclr->create_delegate( "System.Private.CoreLib", "Internal.Runtime.InteropServices.InMemoryAssemblyLoader", "LoadInMemoryAssembly", delegate); case coreclr_delegate_type::winrt_activation: return StatusCode::InvalidArgFailure; case coreclr_delegate_type::com_register: return coreclr->create_delegate( "System.Private.CoreLib", "Internal.Runtime.InteropServices.ComActivator", "RegisterClassForTypeInternal", delegate); case coreclr_delegate_type::com_unregister: return coreclr->create_delegate( "System.Private.CoreLib", "Internal.Runtime.InteropServices.ComActivator", "UnregisterClassForTypeInternal", delegate); case coreclr_delegate_type::load_assembly_and_get_function_pointer: return coreclr->create_delegate( "System.Private.CoreLib", "Internal.Runtime.InteropServices.ComponentActivator", "LoadAssemblyAndGetFunctionPointer", delegate); case coreclr_delegate_type::get_function_pointer: return coreclr->create_delegate( "System.Private.CoreLib", "Internal.Runtime.InteropServices.ComponentActivator", "GetFunctionPointer", delegate); default: return StatusCode::LibHostInvalidArgs; } } int HOSTPOLICY_CALLTYPE get_property(const pal::char_t *key, const pal::char_t **value) { if (key == nullptr) return StatusCode::InvalidArgFailure; const std::shared_ptr<hostpolicy_context_t> context = get_hostpolicy_context(/*require_runtime*/ false); if (context == nullptr) return StatusCode::HostInvalidState; if (!context->coreclr_properties.try_get(key, value)) return StatusCode::HostPropertyNotFound; return StatusCode::Success; } int HOSTPOLICY_CALLTYPE set_property(const pal::char_t *key, const pal::char_t *value) { if (key == nullptr) return StatusCode::InvalidArgFailure; std::lock_guard<std::mutex> lock{ g_context_lock }; if (g_context == nullptr || g_context->coreclr != nullptr) { trace::error(_X("Setting properties is only allowed before runtime has been loaded and initialized")); return HostInvalidState; } if (value != nullptr) { g_context->coreclr_properties.add(key, value); } else { g_context->coreclr_properties.remove(key); } return StatusCode::Success; } int HOSTPOLICY_CALLTYPE get_properties(size_t * count, const pal::char_t **keys, const pal::char_t **values) { if (count == nullptr) return StatusCode::InvalidArgFailure; const std::shared_ptr<hostpolicy_context_t> context = get_hostpolicy_context(/*require_runtime*/ false); if (context == nullptr) return StatusCode::HostInvalidState; size_t actualCount = context->coreclr_properties.count(); size_t input_count = *count; *count = actualCount; if (input_count < actualCount || keys == nullptr || values == nullptr) return StatusCode::HostApiBufferTooSmall; int index = 0; std::function<void (const pal::string_t &,const pal::string_t &)> callback = [&] (const pal::string_t& key, const pal::string_t& value) { keys[index] = key.data(); values[index] = value.data(); ++index; }; context->coreclr_properties.enumerate(callback); return StatusCode::Success; } bool matches_existing_properties(const coreclr_property_bag_t &properties, const corehost_initialize_request_t *init_request) { bool hasDifferentProperties = false; size_t len = init_request->config_keys.len; for (size_t i = 0; i < len; ++i) { const pal::char_t *key = init_request->config_keys.arr[i]; const pal::char_t *value = init_request->config_values.arr[i]; const pal::char_t *existingValue; if (properties.try_get(key, &existingValue)) { if (pal::strcmp(existingValue, value) != 0) { trace::warning(_X("The property [%s] has a different value [%s] from that in the previously loaded runtime [%s]"), key, value, existingValue); hasDifferentProperties = true; } } else { trace::warning(_X("The property [%s] is not present in the previously loaded runtime."), key); hasDifferentProperties = true; } } if (len > 0 && !hasDifferentProperties) trace::info(_X("All specified properties match those in the previously loaded runtime")); return !hasDifferentProperties; } } // Initializes hostpolicy. Calculates everything required to start the runtime and creates a context to track // that information // // Parameters: // init_request // struct containing information about the initialization request. If hostpolicy is not yet initialized, // this is expected to be nullptr. If hostpolicy is already initialized, this should not be nullptr and // this function will use the struct to check for compatibility with the way in which hostpolicy was // previously initialized. // options // initialization options // context_contract // [out] if initialization is successful, populated with a contract for performing operations on hostpolicy // // Return value: // Success - Initialization was succesful // Success_HostAlreadyInitialized - Request is compatible with already initialized hostpolicy // Success_DifferentRuntimeProperties - Request has runtime properties that differ from already initialized hostpolicy // // This function does not load the runtime // // If a previous request to initialize hostpolicy was made, but the runtime was not yet loaded, this function will // block until the runtime is loaded. // // This function assumes corehost_load has already been called. It uses the init information set through that // call - not the struct passed into this function - to create a context. // // Both Success_HostAlreadyInitialized and Success_DifferentRuntimeProperties codes are considered successful // initializations. In the case of Success_DifferentRuntimeProperties, it is left to the consumer to verify that // the difference in properties is acceptable. // SHARED_API int HOSTPOLICY_CALLTYPE corehost_initialize(const corehost_initialize_request_t *init_request, uint32_t options, /*out*/ corehost_context_contract *context_contract) { if (context_contract == nullptr) return StatusCode::InvalidArgFailure; bool version_set = (options & initialization_options_t::context_contract_version_set) != 0; bool wait_for_initialized = (options & initialization_options_t::wait_for_initialized) != 0; bool get_contract = (options & initialization_options_t::get_contract) != 0; if (wait_for_initialized && get_contract) { trace::error(_X("Specifying both initialization options for wait_for_initialized and get_contract is not allowed")); return StatusCode::InvalidArgFailure; } if (get_contract) { if (init_request != nullptr) { trace::error(_X("Initialization request is expected to be null when getting the already initialized contract")); return StatusCode::InvalidArgFailure; } } else { std::unique_lock<std::mutex> lock { g_context_lock }; bool already_initializing = g_context_initializing.load(); bool already_initialized = g_context.get() != nullptr; if (wait_for_initialized) { trace::verbose(_X("Initialization option to wait for initialize request is set")); if (init_request == nullptr) { trace::error(_X("Initialization request is expected to be non-null when waiting for initialize request option is set")); return StatusCode::InvalidArgFailure; } // If we are not already initializing or done initializing, wait until another context initialization has started if (!already_initialized && !already_initializing) { trace::info(_X("Waiting for another request to initialize hostpolicy")); g_context_initializing_cv.wait(lock, [&] { return g_context_initializing.load(); }); } } else { if (init_request != nullptr && !already_initialized && !already_initializing) { trace::error(_X("Initialization request is expected to be null for the first initialization request")); return StatusCode::InvalidArgFailure; } if (init_request == nullptr && (already_initializing || already_initialized)) { trace::error(_X("Initialization request is expected to be non-null for requests other than the first one")); return StatusCode::InvalidArgFailure; } } } // Trace entry point information using previously set init information. // This function does not modify any global state. trace_corehost_libhost_init(g_init, _X("corehost_initialize")); int rc; if (wait_for_initialized) { // Wait for context initialization to complete std::unique_lock<std::mutex> lock{ g_context_lock }; g_context_initializing_cv.wait(lock, [] { return !g_context_initializing.load(); }); const hostpolicy_context_t *existing_context = g_context.get(); if (existing_context == nullptr || existing_context->coreclr == nullptr) { trace::info(_X("Option to wait for initialize request was set, but that request did not result in initialization")); return StatusCode::HostInvalidState; } rc = StatusCode::Success_HostAlreadyInitialized; } else if (get_contract) { const std::shared_ptr<hostpolicy_context_t> context = get_hostpolicy_context(/*require_runtime*/ true); if (context == nullptr) { trace::error(_X("Option to get the contract for the initialized hostpolicy was set, but hostpolicy has not been initialized")); return StatusCode::HostInvalidState; } rc = StatusCode::Success; } else { rc = create_hostpolicy_context(g_init, 0 /*argc*/, nullptr /*argv*/, g_init.host_mode != host_mode_t::libhost); if (rc != StatusCode::Success && rc != StatusCode::Success_HostAlreadyInitialized) return rc; } if (rc == StatusCode::Success_HostAlreadyInitialized) { assert(init_request != nullptr && init_request->version >= offsetof(corehost_initialize_request_t, config_values) + sizeof(init_request->config_values) && init_request->config_keys.len == init_request->config_values.len); const std::shared_ptr<hostpolicy_context_t> context = get_hostpolicy_context(/*require_runtime*/ true); if (context == nullptr) return StatusCode::HostInvalidState; // Compare the current context with this request (properties) if (!matches_existing_properties(context->coreclr_properties, init_request)) rc = StatusCode::Success_DifferentRuntimeProperties; } // If version wasn't set, then it would have the original size of corehost_context_contract, which is 7 * sizeof(size_t). size_t version_lo = version_set ? context_contract->version : 7 * sizeof(size_t); context_contract->version = sizeof(corehost_context_contract); context_contract->get_property_value = get_property; context_contract->set_property_value = set_property; context_contract->get_properties = get_properties; context_contract->load_runtime = create_coreclr; context_contract->run_app = run_app; context_contract->get_runtime_delegate = get_delegate; // An old hostfxr may not have provided enough space for these fields. // The version_lo (sizeof) the old hostfxr saw at build time will be // smaller and we should not attempt to write the fields in that case. if (version_lo >= offsetof(corehost_context_contract, last_known_delegate_type) + sizeof(context_contract->last_known_delegate_type)) { context_contract->last_known_delegate_type = (size_t)coreclr_delegate_type::__last - 1; } return rc; } SHARED_API int HOSTPOLICY_CALLTYPE corehost_unload() { { std::lock_guard<std::mutex> lock{ g_context_lock }; if (g_context != nullptr && g_context->coreclr != nullptr) return StatusCode::Success; // Allow re-initializing if runtime has not been loaded g_context.reset(); g_context_initializing.store(false); } g_context_initializing_cv.notify_all(); std::lock_guard<std::mutex> init_lock{ g_init_lock }; g_init_done = false; return StatusCode::Success; } SHARED_API int HOSTPOLICY_CALLTYPE corehost_resolve_component_dependencies( const pal::char_t *component_main_assembly_path, corehost_resolve_component_dependencies_result_fn result) { if (trace::is_enabled()) { trace_hostpolicy_entrypoint_invocation(_X("corehost_resolve_component_dependencies")); trace::info(_X(" Component main assembly path: %s"), component_main_assembly_path); trace::info(_X("}")); for (const auto& probe : g_init.probe_paths) { trace::info(_X("Additional probe dir: %s"), probe.c_str()); } } // IMPORTANT: g_init is static/global and thus potentially accessed from multiple threads // We must only use it as read-only here (unlike the run scenarios which own it). // For example the frameworks in g_init.fx_definitions can't be used "as-is" by the resolver // right now as it would try to re-parse the .deps.json and thus modify the objects. // The assumption is that component dependency resolution will only be called // when the coreclr is hosted through this hostpolicy and thus it will // have already called corehost_main_init. if (!g_init.host_info.is_valid(g_init.host_mode)) { trace::error(_X("Hostpolicy must be initialized and corehost_main must have been called before calling corehost_resolve_component_dependencies.")); return StatusCode::CoreHostLibLoadFailure; } // If the current host mode is libhost, use apphost instead. host_mode_t host_mode = g_init.host_mode == host_mode_t::libhost ? host_mode_t::apphost : g_init.host_mode; // Initialize arguments (basically the structure describing the input app/component to resolve) arguments_t args; if (!init_arguments( component_main_assembly_path, g_init.host_info, g_init.tfm, host_mode, /* additional_deps_serialized */ pal::string_t(), // Additional deps - don't use those from the app, they're already in the app /* deps_file */ pal::string_t(), // Avoid using any other deps file than the one next to the component g_init.probe_paths, /* init_from_file_system */ true, args)) { return StatusCode::LibHostInvalidArgs; } args.trace(); // Initialize the "app" framework definition. auto app = new fx_definition_t(); // For now intentionally don't process .runtimeconfig.json since we don't perform framework resolution. // Call parse_runtime_config since it initializes the defaults for various settings // but we don't have any .runtimeconfig.json for the component, so pass in empty paths. // Empty paths is a valid case and the method will simply skip parsing anything. app->parse_runtime_config(pal::string_t(), pal::string_t(), runtime_config_t::settings_t()); if (!app->get_runtime_config().is_valid()) { // This should really never happen, but fail gracefully if it does anyway. assert(false); delete app; app = nullptr; trace::error(_X("Failed to initialize empty runtime config for the component.")); return StatusCode::InvalidConfigFile; } // For components we don't want to resolve anything from the frameworks, since those will be supplied by the app. // So only use the component as the "app" framework. fx_definition_vector_t component_fx_definitions; component_fx_definitions.push_back(std::unique_ptr<fx_definition_t>(app)); // TODO Review: Since we're only passing the one component framework, the resolver will not consider // frameworks from the app for probing paths. So potential references to paths inside frameworks will not resolve. // The RID graph still has to come from the actuall root framework, so take that from the g_init.fx_definitions // which are the frameworks for the app. deps_resolver_t resolver( args, component_fx_definitions, &get_root_framework(g_init.fx_definitions).get_deps().get_rid_fallback_graph(), true); pal::string_t resolver_errors; if (!resolver.valid(&resolver_errors)) { trace::error(_X("Error initializing the dependency resolver: %s"), resolver_errors.c_str()); return StatusCode::ResolverInitFailure; } // Don't write breadcrumbs since we're not executing the app, just resolving dependencies // doesn't guarantee that they will actually execute. probe_paths_t probe_paths; if (!resolver.resolve_probe_paths(&probe_paths, nullptr, /* ignore_missing_assemblies */ true)) { return StatusCode::ResolverResolveFailure; } if (trace::is_enabled()) { trace::info(_X("corehost_resolve_component_dependencies results: {")); trace::info(_X(" assembly_paths: '%s'"), probe_paths.tpa.data()); trace::info(_X(" native_search_paths: '%s'"), probe_paths.native.data()); trace::info(_X(" resource_search_paths: '%s'"), probe_paths.resources.data()); trace::info(_X("}")); } result( probe_paths.tpa.data(), probe_paths.native.data(), probe_paths.resources.data()); return 0; } // // Sets a callback which is to be used to write errors to. // // Parameters: // error_writer // A callback function which will be invoked every time an error is to be reported. // Or nullptr to unregister previously registered callback and return to the default behavior. // Return value: // The previously registered callback (which is now unregistered), or nullptr if no previous callback // was registered // // The error writer is registered per-thread, so the registration is thread-local. On each thread // only one callback can be registered. Subsequent registrations overwrite the previous ones. // // By default no callback is registered in which case the errors are written to stderr. // // Each call to the error writer is sort of like writing a single line (the EOL character is omitted). // Multiple calls to the error writer may occure for one failure. // SHARED_API corehost_error_writer_fn HOSTPOLICY_CALLTYPE corehost_set_error_writer(corehost_error_writer_fn error_writer) { return trace::set_error_writer(error_writer); }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/mono/mono/utils/refcount.h
/** * \file */ #ifndef __MONO_UTILS_REFCOUNT_H__ #define __MONO_UTILS_REFCOUNT_H__ #include <glib.h> #include <config.h> #include "atomic.h" /* * Mechanism for ref-counting which tries to be as user-friendly as possible. Instead of being a wrapper around * user-provided data, it is embedded into the user data. * * This introduces some constraints on the MonoRefCount field: * - it needs to be called "ref" * - it cannot be a pointer */ typedef struct { guint32 ref; void (*destructor) (gpointer data); } MonoRefCount; #define mono_refcount_init(v,destructor) do { mono_refcount_initialize (&(v)->ref, (destructor)); } while (0) #define mono_refcount_inc(v) (mono_refcount_increment (&(v)->ref),(v)) #define mono_refcount_tryinc(v) (mono_refcount_tryincrement (&(v)->ref)) #define mono_refcount_dec(v) (mono_refcount_decrement (&(v)->ref)) static inline void mono_refcount_initialize (MonoRefCount *refcount, void (*destructor) (gpointer data)) { refcount->ref = 1; refcount->destructor = destructor; } static inline gboolean mono_refcount_tryincrement (MonoRefCount *refcount) { guint32 oldref, newref; g_assert (refcount); do { oldref = refcount->ref; if (oldref == 0) return FALSE; newref = oldref + 1; } while (mono_atomic_cas_i32 ((gint32*) &refcount->ref, (gint32)newref, (gint32)oldref) != (gint32)oldref); return TRUE; } static inline void mono_refcount_increment (MonoRefCount *refcount) { if (!mono_refcount_tryincrement (refcount)) g_error ("%s: cannot increment a ref with value 0", __func__); } static inline guint32 mono_refcount_decrement (MonoRefCount *refcount) { guint32 oldref, newref; g_assert (refcount); do { oldref = refcount->ref; if (oldref == 0) g_error ("%s: cannot decrement a ref with value 0", __func__); newref = oldref - 1; } while (mono_atomic_cas_i32 ((gint32*) &refcount->ref, (gint32)newref, (gint32)oldref) != (gint32)oldref); if (newref == 0 && refcount->destructor) refcount->destructor ((gpointer) refcount); return newref; } #endif /* __MONO_UTILS_REFCOUNT_H__ */
/** * \file */ #ifndef __MONO_UTILS_REFCOUNT_H__ #define __MONO_UTILS_REFCOUNT_H__ #include <glib.h> #include <config.h> #include "atomic.h" /* * Mechanism for ref-counting which tries to be as user-friendly as possible. Instead of being a wrapper around * user-provided data, it is embedded into the user data. * * This introduces some constraints on the MonoRefCount field: * - it needs to be called "ref" * - it cannot be a pointer */ typedef struct { guint32 ref; void (*destructor) (gpointer data); } MonoRefCount; #define mono_refcount_init(v,destructor) do { mono_refcount_initialize (&(v)->ref, (destructor)); } while (0) #define mono_refcount_inc(v) (mono_refcount_increment (&(v)->ref),(v)) #define mono_refcount_tryinc(v) (mono_refcount_tryincrement (&(v)->ref)) #define mono_refcount_dec(v) (mono_refcount_decrement (&(v)->ref)) static inline void mono_refcount_initialize (MonoRefCount *refcount, void (*destructor) (gpointer data)) { refcount->ref = 1; refcount->destructor = destructor; } static inline gboolean mono_refcount_tryincrement (MonoRefCount *refcount) { guint32 oldref, newref; g_assert (refcount); do { oldref = refcount->ref; if (oldref == 0) return FALSE; newref = oldref + 1; } while (mono_atomic_cas_i32 ((gint32*) &refcount->ref, (gint32)newref, (gint32)oldref) != (gint32)oldref); return TRUE; } static inline void mono_refcount_increment (MonoRefCount *refcount) { if (!mono_refcount_tryincrement (refcount)) g_error ("%s: cannot increment a ref with value 0", __func__); } static inline guint32 mono_refcount_decrement (MonoRefCount *refcount) { guint32 oldref, newref; g_assert (refcount); do { oldref = refcount->ref; if (oldref == 0) g_error ("%s: cannot decrement a ref with value 0", __func__); newref = oldref - 1; } while (mono_atomic_cas_i32 ((gint32*) &refcount->ref, (gint32)newref, (gint32)oldref) != (gint32)oldref); if (newref == 0 && refcount->destructor) refcount->destructor ((gpointer) refcount); return newref; } #endif /* __MONO_UTILS_REFCOUNT_H__ */
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/mono/mono/utils/mono-errno.h
/** * \file * Access the native error code * * Author: * Alexander Kyte ([email protected]) * * (C) 2018 Microsoft, Inc. * */ #ifndef __MONO_ERRNO_H__ #define __MONO_ERRNO_H__ #include <errno.h> // Enough indirection to do something else here, or log inline static void mono_set_errno (int errno_val) { errno = errno_val; } #endif
/** * \file * Access the native error code * * Author: * Alexander Kyte ([email protected]) * * (C) 2018 Microsoft, Inc. * */ #ifndef __MONO_ERRNO_H__ #define __MONO_ERRNO_H__ #include <errno.h> // Enough indirection to do something else here, or log inline static void mono_set_errno (int errno_val) { errno = errno_val; } #endif
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/ildasm/dasm.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "ildasmpch.h" #include <crtdbg.h> #include <utilcode.h> #include "specstrings.h" #include "debugmacros.h" #include "corpriv.h" #include "ceeload.h" #include "dynamicarray.h" #include <metamodelpub.h> #include "formattype.h" #define DECLARE_DATA #include "dasmenum.hpp" #include "dis.h" #include "resource.h" #include "dasm_sz.h" //#define MAX_FILENAME_LENGTH 2048 //moved to dis.h #include <corsym.h> #include <clrversion.h> // Disable the "initialization of static local vars is no thread safe" error #ifdef _MSC_VER #pragma warning(disable : 4640) #endif #ifdef TARGET_UNIX #include "resourcestring.h" #define NATIVE_STRING_RESOURCE_NAME dasm_rc DECLARE_NATIVE_STRING_RESOURCE_TABLE(NATIVE_STRING_RESOURCE_NAME); #endif #include "mdfileformat.h" struct MIDescriptor { mdToken tkClass; // defining class token mdToken tkDecl; // implemented method token mdToken tkBody; // implementing method token mdToken tkBodyParent; // parent of the implementing method }; ISymUnmanagedReader* g_pSymReader = NULL; IMDInternalImport* g_pImport = NULL; IMetaDataImport2* g_pPubImport; extern IMetaDataAssemblyImport* g_pAssemblyImport; PELoader * g_pPELoader; void * g_pMetaData; unsigned g_cbMetaData; IMAGE_COR20_HEADER * g_CORHeader; DynamicArray<__int32> *g_pPtrTags = NULL; //to keep track of all "ldptr" DynamicArray<DWORD> *g_pPtrSize= NULL; //to keep track of all "ldptr" int g_iPtrCount = 0; mdToken * g_cl_list = NULL; mdToken * g_cl_enclosing = NULL; BYTE* g_enum_td_type = NULL; // enum (TD) underlying types BYTE* g_enum_tr_type = NULL; // enum (TR) underlying types IMDInternalImport** g_asmref_import = NULL; // IMDInternalImports for external assemblies DynamicArray<MIDescriptor> *g_pmi_list = NULL; DWORD g_NumMI; DWORD g_NumClasses; DWORD g_NumTypeRefs; DWORD g_NumAsmRefs; DWORD g_NumModules; BOOL g_fDumpIL = TRUE; BOOL g_fDumpHeader = FALSE; BOOL g_fDumpAsmCode = TRUE; extern BOOL g_fDumpTokens; // declared in formatType.cpp BOOL g_fDumpStats = FALSE; BOOL g_fTDC = TRUE; BOOL g_fShowCA = TRUE; BOOL g_fCAVerbal = FALSE; BOOL g_fShowRefs = FALSE; BOOL g_fDumpToPerfWriter = FALSE; HANDLE g_PerfDataFilePtr = NULL; BOOL g_fDumpClassList = FALSE; BOOL g_fDumpTypeList = FALSE; BOOL g_fDumpSummary = FALSE; BOOL g_fDecompile = FALSE; // still in progress BOOL g_fShowBytes = FALSE; BOOL g_fShowSource = FALSE; BOOL g_fPrettyPrint = FALSE; BOOL g_fInsertSourceLines = FALSE; BOOL g_fThisIsInstanceMethod; BOOL g_fTryInCode = TRUE; BOOL g_fLimitedVisibility = FALSE; BOOL g_fHidePub = TRUE; BOOL g_fHidePriv = TRUE; BOOL g_fHideFam = TRUE; BOOL g_fHideAsm = TRUE; BOOL g_fHideFAA = TRUE; BOOL g_fHideFOA = TRUE; BOOL g_fHidePrivScope = TRUE; BOOL g_fProject = FALSE; // if .winmd file, transform to .NET view extern BOOL g_fQuoteAllNames; // declared in formatType.cpp, init to FALSE BOOL g_fForwardDecl=FALSE; char g_szAsmCodeIndent[MAX_MEMBER_LENGTH]; char g_szNamespace[MAX_MEMBER_LENGTH]; DWORD g_Mode = MODE_DUMP_ALL; char g_pszClassToDump[MAX_CLASSNAME_LENGTH]; char g_pszMethodToDump[MAX_MEMBER_LENGTH]; char g_pszSigToDump[MAX_SIGNATURE_LENGTH]; BOOL g_fCustomInstructionEncodingSystem = FALSE; COR_FIELD_OFFSET *g_rFieldOffset = NULL; ULONG g_cFieldsMax, g_cFieldOffsets; char* g_pszExeFile; char g_szInputFile[MAX_FILENAME_LENGTH]; // in UTF-8 WCHAR g_wszFullInputFile[MAX_PATH + 1]; // in UTF-16 char g_szOutputFile[MAX_FILENAME_LENGTH]; // in UTF-8 char* g_pszObjFileName; FILE* g_pFile = NULL; mdToken g_tkClassToDump = 0; mdToken g_tkMethodToDump = 0; unsigned g_uConsoleCP = CP_ACP; unsigned g_uCodePage = g_uConsoleCP; char* g_rchCA = NULL; // dyn.allocated array of CA dumped/not flags unsigned g_uNCA = 0; // num. of CAs struct ResourceNode; extern DynamicArray<LocalComTypeDescr*> *g_pLocalComType; extern ULONG g_LocalComTypeNum; // MetaInfo integration: #include "../tools/metainfo/mdinfo.h" BOOL g_fDumpMetaInfo = FALSE; ULONG g_ulMetaInfoFilter = MDInfo::dumpDefault; // Validator module type. DWORD g_ValModuleType = ValidatorModuleTypeInvalid; IMetaDataDispenserEx *g_pDisp = NULL; void DisplayFile(_In_ __nullterminated WCHAR* szFile, BOOL isFile, ULONG DumpFilter, _In_opt_z_ WCHAR* szObjFile, strPassBackFn pDisplayString); extern mdMethodDef g_tkEntryPoint; // integration with MetaInfo DWORD DumpResourceToFile(_In_ __nullterminated WCHAR* wzFileName); // see DRES.CPP struct VTableRef { mdMethodDef tkTok; WORD wEntry; WORD wSlot; }; DynamicArray<VTableRef> *g_prVTableRef = NULL; ULONG g_nVTableRef = 0; struct EATableRef { mdMethodDef tkTok; char* pszName; }; DynamicArray<EATableRef> *g_prEATableRef=NULL; ULONG g_nEATableRef = 0; ULONG g_nEATableBase = 0; extern HINSTANCE g_hResources; void DumpCustomAttributeProps(mdToken tkCA, mdToken tkType, mdToken tkOwner, BYTE*pBlob, ULONG ulLen, void *GUICookie, bool bWithOwner); WCHAR* RstrW(unsigned id) { static WCHAR buffer[1024]; DWORD cchBuff = (DWORD)ARRAY_SIZE(buffer); WCHAR* buff = (WCHAR*)buffer; memset(buffer,0,sizeof(buffer)); switch(id) { case IDS_E_DASMOK: case IDS_E_PARTDASM: case IDS_E_PARAMSEQNO: case IDS_E_MEMBRENUM: case IDS_E_ODDMEMBER: case IDS_E_ENUMINIT: case IDS_E_NODATA: case IDS_E_VTFUTABLE: case IDS_E_BOGUSRVA: case IDS_E_EATJTABLE: case IDS_E_EATJSIZE: case IDS_E_RESFLAGS: case IDS_E_MIHENTRY: case IDS_E_CODEMGRTBL: case IDS_E_COMIMAGE: case IDS_E_MDDETAILS: case IDS_E_MISTART: case IDS_E_MIEND: case IDS_E_ONLYITEMS: case IDS_E_DECOMPRESS: case IDS_E_COMPRESSED: case IDS_E_INSTRDECOD: case IDS_E_INSTRTYPE: case IDS_E_SECTHEADER: case IDS_E_MDAIMPORT: case IDS_E_MDAFROMMDI: case IDS_E_MDIIMPORT: case IDS_E_NOMANIFEST: case IDS_W_CREATEDW32RES: case IDS_E_CORRUPTW32RES: case IDS_E_CANTACCESSW32RES: case IDS_E_CANTOPENW32RES: case IDS_ERRORREOPENINGFILE: wcscpy_s(buffer,ARRAY_SIZE(buffer),W("// ")); buff +=3; cchBuff -= 3; break; case IDS_E_AUTOCA: case IDS_E_METHBEG: case IDS_E_DASMNATIVE: case IDS_E_METHODRT: case IDS_E_CODESIZE: case IDS_W_CREATEDMRES: case IDS_E_READINGMRES: wcscpy_s(buffer,ARRAY_SIZE(buffer),W("%s// ")); buff +=5; cchBuff -= 5; break; case IDS_E_NORVA: wcscpy_s(buffer,ARRAY_SIZE(buffer),W("/* ")); buff += 3; cchBuff -= 3; break; default: break; } #ifdef TARGET_UNIX LoadNativeStringResource(NATIVE_STRING_RESOURCE_TABLE(NATIVE_STRING_RESOURCE_NAME),id, buff, cchBuff, NULL); #else _ASSERTE(g_hResources != NULL); WszLoadString(g_hResources,id,buff,cchBuff); #endif if(id == IDS_E_NORVA) wcscat_s(buff,cchBuff,W(" */")); return buffer; } char* RstrA(unsigned n, unsigned codepage) { static char buff[2048]; WCHAR* wz = RstrW(n); // Unicode -> UTF-8 memset(buff,0,sizeof(buff)); if(!WszWideCharToMultiByte(codepage,0,(LPCWSTR)wz,-1,buff,sizeof(buff),NULL,NULL)) buff[0] = 0; return buff; } char* RstrUTF(unsigned n) { return RstrA(n,CP_UTF8); } char* RstrANSI(unsigned n) { return RstrA(n,g_uConsoleCP); } #if 0 void PrintEncodingSystem() { long i; printf("Custom opcode encoding system employed\n"); printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); for (i = 0; i < 256; i++) { long value = g_pInstructionDecodingTable->m_SingleByteOpcodes[i]; printf("0x%02x --> ", i); printf("%s\n", OpcodeInfo[value].pszName); } } #endif // buffers for formatType functions extern CQuickBytes * g_szBuf_KEYWORD; extern CQuickBytes * g_szBuf_COMMENT; extern CQuickBytes * g_szBuf_ERRORMSG; extern CQuickBytes * g_szBuf_ANCHORPT; extern CQuickBytes * g_szBuf_JUMPPT; extern CQuickBytes * g_szBuf_UnquotedProperName; extern CQuickBytes * g_szBuf_ProperName; BOOL Init() { g_szBuf_KEYWORD = new CQuickBytes(); g_szBuf_COMMENT = new CQuickBytes(); g_szBuf_ERRORMSG = new CQuickBytes(); g_szBuf_ANCHORPT = new CQuickBytes(); g_szBuf_JUMPPT = new CQuickBytes(); g_szBuf_UnquotedProperName = new CQuickBytes(); g_szBuf_ProperName = new CQuickBytes(); return TRUE; } // Init extern LPCSTR *rAsmRefName; // decl. in formatType.cpp -- for AsmRef aliases extern ULONG ulNumAsmRefs; // decl. in formatType.cpp -- for AsmRef aliases void Cleanup() { if (g_pAssemblyImport != NULL) { g_pAssemblyImport->Release(); g_pAssemblyImport = NULL; } if (g_pPubImport != NULL) { g_pPubImport->Release(); g_pPubImport = NULL; } if (g_pImport != NULL) { g_pImport->Release(); g_pImport = NULL; TokenSigDelete(); } if (g_pDisp != NULL) { g_pDisp->Release(); g_pDisp = NULL; } if (g_pSymReader != NULL) { g_pSymReader->Release(); g_pSymReader = NULL; } if (g_pPELoader != NULL) { g_pPELoader->close(); SDELETE(g_pPELoader); } g_iPtrCount = 0; g_NumClasses = 0; g_NumTypeRefs = 0; g_NumModules = 0; g_tkEntryPoint = 0; g_szAsmCodeIndent[0] = 0; g_szNamespace[0]=0; g_pszClassToDump[0]=0; g_pszMethodToDump[0]=0; g_pszSigToDump[0] = 0; g_NumDups = 0; g_NumRefs = 0; g_NumMI = 0; g_LocalComTypeNum = 0; g_nEATableRef = 0; g_fCustomInstructionEncodingSystem = FALSE; if (rAsmRefName != NULL) { for (int i = 0; (unsigned)i < ulNumAsmRefs; i++) { if (rAsmRefName[i] != NULL) VDELETE(rAsmRefName[i]); } VDELETE(rAsmRefName); ulNumAsmRefs = 0; } if (g_rchCA != NULL) VDELETE(g_rchCA); if (g_cl_list != NULL) VDELETE(g_cl_list); if (g_cl_enclosing != NULL) VDELETE(g_cl_enclosing); if (g_pmi_list != NULL) SDELETE(g_pmi_list); if (g_dups != NULL) SDELETE(g_dups); if (g_enum_td_type != NULL) VDELETE(g_enum_td_type); if (g_enum_tr_type != NULL) VDELETE(g_enum_tr_type); if (g_asmref_import != NULL) { for (DWORD i = 0; i < g_NumAsmRefs; i++) { if (g_asmref_import[i] != NULL) g_asmref_import[i]->Release(); } VDELETE(g_asmref_import); g_NumAsmRefs = 0; } } // Cleanup void Uninit() { if (g_pPtrTags != NULL) { SDELETE(g_pPtrTags); } if (g_pPtrSize != NULL) { SDELETE(g_pPtrSize); } if (g_pmi_list != NULL) { SDELETE(g_pmi_list); } if (g_dups != NULL) SDELETE(g_dups); if (g_refs != NULL) SDELETE(g_refs); if (g_pLocalComType != NULL) { SDELETE(g_pLocalComType); } if (g_prVTableRef != NULL) { SDELETE(g_prVTableRef); } if (g_prEATableRef != NULL) { SDELETE(g_prEATableRef); } if (g_szBuf_KEYWORD != NULL) { SDELETE(g_szBuf_KEYWORD); } if (g_szBuf_COMMENT != NULL) { SDELETE(g_szBuf_COMMENT); } if (g_szBuf_ERRORMSG != NULL) { SDELETE(g_szBuf_ERRORMSG); } if (g_szBuf_ANCHORPT != NULL) { SDELETE(g_szBuf_ANCHORPT); } if (g_szBuf_JUMPPT != NULL) { SDELETE(g_szBuf_JUMPPT); } if (g_szBuf_UnquotedProperName != NULL) { SDELETE(g_szBuf_UnquotedProperName); } if (g_szBuf_ProperName != NULL) { SDELETE(g_szBuf_ProperName); } } // Uninit HRESULT IsClassRefInScope(mdTypeRef classref) { HRESULT hr = S_OK; const char *pszNameSpace; const char *pszClassName; mdTypeDef classdef; mdToken tkRes; IfFailRet(g_pImport->GetNameOfTypeRef(classref, &pszNameSpace, &pszClassName)); MAKE_NAME_IF_NONE(pszClassName,classref); IfFailRet(g_pImport->GetResolutionScopeOfTypeRef(classref, &tkRes)); hr = g_pImport->FindTypeDef(pszNameSpace, pszClassName, (TypeFromToken(tkRes) == mdtTypeRef) ? tkRes : mdTokenNil, &classdef); return hr; } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:21000) // Suppress PREFast warning about overly large function #endif BOOL EnumClasses() { HRESULT hr; HENUMInternal hEnum; ULONG i = 0,j; //char szString[1024]; HENUMInternal hBody; HENUMInternal hDecl; if(g_cl_list) VDELETE(g_cl_list); if(g_cl_enclosing) VDELETE(g_cl_enclosing); if (g_pmi_list) SDELETE(g_pmi_list); if (g_dups) SDELETE(g_dups); if (g_enum_td_type) VDELETE(g_enum_td_type); if (g_enum_tr_type) VDELETE(g_enum_tr_type); if (g_asmref_import) { for (DWORD nIndex = 0; nIndex < g_NumAsmRefs; nIndex++) { if (g_asmref_import[nIndex] != NULL) g_asmref_import[nIndex]->Release(); } VDELETE(g_asmref_import); g_NumAsmRefs = 0; } //-------------------------------------------------------------- if (FAILED(g_pImport->EnumAllInit(mdtTypeRef,&hEnum))) { printError(g_pFile, "MetaData error: cannot enumerate all TypeRefs"); return FALSE; } g_NumTypeRefs = g_pImport->EnumGetCount(&hEnum); g_pImport->EnumClose(&hEnum); if(g_NumTypeRefs) { g_enum_tr_type = new BYTE[g_NumTypeRefs+1]; if(g_enum_tr_type == NULL) return FALSE; memset(g_enum_tr_type,0xFF,g_NumTypeRefs+1); } //-------------------------------------------------------------- if (FAILED(g_pImport->EnumAllInit(mdtAssemblyRef, &hEnum))) { printError(g_pFile, "MetaData error: cannot enumerate all AssemblyRefs"); return FALSE; } g_NumAsmRefs = g_pImport->EnumGetCount(&hEnum); g_pImport->EnumClose(&hEnum); if(g_NumAsmRefs) { g_asmref_import = new IMDInternalImport*[g_NumAsmRefs+1]; if(g_asmref_import == NULL) return FALSE; memset(g_asmref_import,0,(g_NumAsmRefs+1)*sizeof(IMDInternalImport*)); } //-------------------------------------------------------------- hr = g_pImport->EnumTypeDefInit( &hEnum); if (FAILED(hr)) { printError(g_pFile,RstrUTF(IDS_E_CLSENUM)); return FALSE; } g_NumClasses = g_pImport->EnumGetCount(&hEnum); g_tkClassToDump = 0; g_NumMI = 0; g_NumDups = 0; if(g_NumClasses == 0) return TRUE; g_enum_td_type = new BYTE[g_NumClasses+1]; if(g_enum_td_type == NULL) return FALSE; memset(g_enum_td_type,0xFF,g_NumClasses+1); g_cl_list = new mdToken[g_NumClasses]; if(g_cl_list == NULL) return FALSE; g_cl_enclosing = new mdToken[g_NumClasses]; if(g_cl_enclosing == NULL) { VDELETE(g_cl_list); return FALSE; } g_pmi_list = new DynamicArray<MIDescriptor>; if(g_pmi_list == NULL) { VDELETE(g_cl_enclosing); VDELETE(g_cl_list); return FALSE; } g_dups = new DynamicArray<mdToken>; if(g_dups == NULL) { SDELETE(g_pmi_list); VDELETE(g_cl_enclosing); VDELETE(g_cl_list); return FALSE; } // fill the list of typedef tokens while(g_pImport->EnumNext(&hEnum, &g_cl_list[i])) { mdToken tkEnclosing; if (g_Mode == MODE_DUMP_CLASS || g_Mode == MODE_DUMP_CLASS_METHOD || g_Mode == MODE_DUMP_CLASS_METHOD_SIG) { CQuickBytes out; // we want plain class name without token values BOOL fDumpTokens = g_fDumpTokens; g_fDumpTokens = FALSE; PAL_CPP_TRY { if (strcmp(PrettyPrintClass(&out, g_cl_list[i], g_pImport), g_pszClassToDump) == 0) { g_tkClassToDump = g_cl_list[i]; } } PAL_CPP_CATCH_ALL { } PAL_CPP_ENDTRY; g_fDumpTokens = fDumpTokens; } g_cl_enclosing[i] = mdTypeDefNil; hr = g_pImport->GetNestedClassProps(g_cl_list[i],&tkEnclosing); if (SUCCEEDED(hr) && RidFromToken(tkEnclosing)) // No need to check token validity here, it's done later g_cl_enclosing[i] = tkEnclosing; if (SUCCEEDED(g_pImport->EnumMethodImplInit(g_cl_list[i],&hBody,&hDecl))) { if ((j = g_pImport->EnumMethodImplGetCount(&hBody,&hDecl))) { mdToken tkBody,tkDecl,tkBodyParent; for (ULONG k = 0; k < j; k++) { if (g_pImport->EnumMethodImplNext(&hBody,&hDecl,&tkBody,&tkDecl) == S_OK) { if (SUCCEEDED(g_pImport->GetParentToken(tkBody,&tkBodyParent))) { (*g_pmi_list)[g_NumMI].tkClass = g_cl_list[i]; (*g_pmi_list)[g_NumMI].tkBody = tkBody; (*g_pmi_list)[g_NumMI].tkDecl = tkDecl; (*g_pmi_list)[g_NumMI].tkBodyParent = tkBodyParent; g_NumMI++; } } } } g_pImport->EnumMethodImplClose(&hBody,&hDecl); } i++; } g_pImport->EnumClose(&hEnum); // check nesting consistency (circular nesting, invalid enclosers) for(i = 0; i < g_NumClasses; i++) { mdToken tkThis = g_cl_list[i]; mdToken tkEncloser = g_cl_enclosing[i]; mdToken tkPrevLevel = tkThis; while(tkEncloser != mdTypeDefNil) { if(tkThis == tkEncloser) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_SELFNSTD),tkThis); printError(g_pFile,szString); g_cl_enclosing[i] = mdTypeDefNil; break; } else { for(j = 0; (j < g_NumClasses)&&(tkEncloser != g_cl_list[j]); j++); if(j == g_NumClasses) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_NOENCLOS), tkPrevLevel,tkEncloser); printError(g_pFile,szString); g_cl_enclosing[i] = mdTypeDefNil; break; } else { tkPrevLevel = tkEncloser; tkEncloser = g_cl_enclosing[j]; } } } // end while(tkEncloser != mdTypeDefNil) } // end for(i = 0; i < g_NumClasses; i++) // register all class dups const char *pszClassName; const char *pszNamespace; const char *pszClassName1; const char *pszNamespace1; if (FAILED(g_pImport->GetNameOfTypeDef( g_cl_list[0], &pszClassName, &pszNamespace))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), g_cl_list[0]); printLine(g_pFile, sz); return FALSE; } if((g_cl_enclosing[0]==mdTypeDefNil) &&(0==strcmp(pszClassName,"<Module>")) &&(*pszNamespace == 0)) { (*g_dups)[g_NumDups++] = g_cl_list[0]; } for(i = 1; i < g_NumClasses; i++) { if (FAILED(g_pImport->GetNameOfTypeDef( g_cl_list[i], &pszClassName, &pszNamespace))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), g_cl_list[i]); printLine(g_pFile, sz); return FALSE; } for(j = 0; j < i; j++) { if (FAILED(g_pImport->GetNameOfTypeDef( g_cl_list[j], &pszClassName1, &pszNamespace1))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), g_cl_list[j]); printLine(g_pFile, sz); return FALSE; } if((g_cl_enclosing[i]==g_cl_enclosing[j]) &&(0==strcmp(pszClassName,pszClassName1)) &&(0==strcmp(pszNamespace,pszNamespace1))) { (*g_dups)[g_NumDups++] = g_cl_list[i]; break; } } } // end for(i = 1; i < g_NumClasses; i++) //register all field and method dups for(i = 0; i <= g_NumClasses; i++) { HENUMInternal hEnumMember; mdToken *pMemberList = NULL; DWORD NumMembers,k; // methods if (i != 0) { hr = g_pImport->EnumInit(mdtMethodDef, g_cl_list[i-1], &hEnumMember); } else { hr = g_pImport->EnumGlobalFunctionsInit(&hEnumMember); } if (FAILED(hr)) { printLine(g_pFile,RstrUTF(IDS_E_MEMBRENUM)); return FALSE; } NumMembers = g_pImport->EnumGetCount(&hEnumMember); pMemberList = new mdToken[NumMembers]; for (j = 0; g_pImport->EnumNext(&hEnumMember, &pMemberList[j]); j++); _ASSERTE(j == NumMembers); g_pImport->EnumClose(&hEnumMember); for (j = 1; j < NumMembers; j++) { const char *pszName; ULONG cSig; PCCOR_SIGNATURE pSig; if (FAILED(g_pImport->GetNameOfMethodDef(pMemberList[j], &pszName)) || FAILED(g_pImport->GetSigOfMethodDef(pMemberList[j], &cSig, &pSig))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), pMemberList[j]); printLine(g_pFile, sz); return FALSE; } for (k = 0; k < j; k++) { const char *szName1; if (FAILED(g_pImport->GetNameOfMethodDef(pMemberList[k], &szName1))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), pMemberList[k]); printLine(g_pFile, sz); return FALSE; } if (strcmp(pszName, szName1) == 0) { ULONG cSig1; PCCOR_SIGNATURE pSig1; if (FAILED(g_pImport->GetSigOfMethodDef(pMemberList[k], &cSig1, &pSig1))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), pMemberList[k]); printLine(g_pFile, sz); return FALSE; } if((cSig == cSig1)&&(0==memcmp(pSig,pSig1,cSig))) { (*g_dups)[g_NumDups++] = pMemberList[j]; break; } } } } VDELETE(pMemberList); // fields if (i != 0) { hr = g_pImport->EnumInit(mdtFieldDef, g_cl_list[i-1], &hEnumMember); } else { hr = g_pImport->EnumGlobalFieldsInit(&hEnumMember); } if (FAILED(hr)) { printLine(g_pFile,RstrUTF(IDS_E_MEMBRENUM)); return FALSE; } NumMembers = g_pImport->EnumGetCount(&hEnumMember); pMemberList = new mdToken[NumMembers]; for (j = 0; g_pImport->EnumNext(&hEnumMember, &pMemberList[j]); j++); _ASSERTE(j == NumMembers); g_pImport->EnumClose(&hEnumMember); for (j = 1; j < NumMembers; j++) { const char *pszName; ULONG cSig; PCCOR_SIGNATURE pSig; if (FAILED(g_pImport->GetNameOfFieldDef(pMemberList[j], &pszName)) || FAILED(g_pImport->GetSigOfFieldDef(pMemberList[j], &cSig, &pSig))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), pMemberList[j]); printLine(g_pFile, sz); return FALSE; } for (k = 0; k < j; k++) { const char *szName1; if (FAILED(g_pImport->GetNameOfFieldDef(pMemberList[k], &szName1))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), pMemberList[k]); printLine(g_pFile, sz); return FALSE; } if (strcmp(pszName, szName1) == 0) { ULONG cSig1; PCCOR_SIGNATURE pSig1; if (FAILED(g_pImport->GetSigOfFieldDef(pMemberList[k], &cSig1, &pSig1))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), pMemberList[k]); printLine(g_pFile, sz); return FALSE; } if((cSig == cSig1)&&(0==memcmp(pSig,pSig1,cSig))) { (*g_dups)[g_NumDups++] = pMemberList[j]; break; } } } } VDELETE(pMemberList); } // end for(i = 0; i <= g_NumClasses; i++) return TRUE; } #ifdef _PREFAST_ #pragma warning(pop) #endif void DumpMscorlib(void* GUICookie) { // In the CoreCLR with reference assemblies and redirection it is more difficult to determine if // a particular Assembly is the System assembly, like mscorlib.dll is for the Desktop CLR. // In the CoreCLR runtimes, the System assembly can be System.Private.CoreLib.dll, System.Runtime.dll // or netstandard.dll and in the future a different Assembly name could be used. // We now determine the identity of the System assembly by querying if the Assembly defines the // well known type System.Object as that type must be defined by the System assembly // If this type is defined then we will output the ".mscorlib" directive to indicate that this // assembly is the System assembly. // mdTypeDef tkObjectTypeDef = mdTypeDefNil; // Lookup the type System.Object and see it it has a type definition in this assembly if (SUCCEEDED(g_pPubImport->FindTypeDefByName(W("System.Object"), mdTypeDefNil, &tkObjectTypeDef))) { if (tkObjectTypeDef != mdTypeDefNil) { // We do have a type definition for System.Object in this assembly // DWORD dwClassAttrs = 0; mdToken tkExtends = mdTypeDefNil; // Retrieve the type def properties as well, so that we can check a few more things about // the System.Object type // if (SUCCEEDED(g_pPubImport->GetTypeDefProps(tkObjectTypeDef, NULL, NULL, 0, &dwClassAttrs, &tkExtends))) { bool bExtends = g_pPubImport->IsValidToken(tkExtends); bool isClass = ((dwClassAttrs & tdClassSemanticsMask) == tdClass); // We also check the type properties to make sure that we have a class and not a Value type definition // and that this type definition isn't extending another type. // if (isClass & !bExtends) { // We will mark this assembly with the System assembly directive: .mscorlib // printLine(GUICookie, ""); sprintf_s(szString, SZSTRING_SIZE, "%s%s ", g_szAsmCodeIndent, KEYWORD(".mscorlib")); printLine(GUICookie, szString); printLine(GUICookie, ""); } } } } } void DumpTypelist(void* GUICookie) { if(g_NumClasses > 1) { DWORD i; CQuickBytes out; printLine(GUICookie,""); sprintf_s(szString,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".typelist")); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"%s%s",g_szAsmCodeIndent,SCOPE()); printLine(GUICookie,szString); strcat_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH," "); for(i = 0; i < g_NumClasses; i++) { out.Shrink(0); sprintf_s(szString,SZSTRING_SIZE, "%s%s",g_szAsmCodeIndent, PrettyPrintClass(&out, g_cl_list[i], g_pImport)); printLine(GUICookie,szString); } g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-2] = 0; sprintf_s(szString,SZSTRING_SIZE,"%s%s",g_szAsmCodeIndent,UNSCOPE()); printLine(GUICookie,szString); printLine(GUICookie,""); } } #define ELEMENT_TYPE_TYPEDEF (ELEMENT_TYPE_MAX+1) BOOL EnumTypedefs() { HENUMInternal hEnum; ULONG i,l; mdToken tk; if (g_typedefs) SDELETE(g_typedefs); g_typedefs = new DynamicArray<TypeDefDescr>; g_NumTypedefs = 0; if (FAILED(g_pImport->EnumAllInit(mdtTypeSpec, &hEnum))) { return FALSE; } for (i = 0; g_pImport->EnumNext(&hEnum, &tk); i++) { ULONG cSig; PCCOR_SIGNATURE sig; if (FAILED(g_pImport->GetSigFromToken(tk, &cSig, &sig))) { return FALSE; } if (*sig == ELEMENT_TYPE_TYPEDEF) { TypeDefDescr* pTDD = &((*g_typedefs)[g_NumTypedefs]); pTDD->szName = (char*)sig+1; l = 2+(ULONG)strlen((char*)sig+1); pTDD->tkTypeSpec = GET_UNALIGNED_VAL32(sig + l); pTDD->tkSelf = tk; if (TypeFromToken(pTDD->tkTypeSpec) == mdtTypeSpec) { if (FAILED(g_pImport->GetSigFromToken(pTDD->tkTypeSpec,&(pTDD->cb), &(pTDD->psig)))) { return FALSE; } } else if (TypeFromToken(pTDD->tkTypeSpec) == mdtCustomAttribute) { l += sizeof(mdToken); pTDD->psig = sig + l; pTDD->cb = cSig - l; } else { pTDD->psig = NULL; pTDD->cb = 0; } g_NumTypedefs++; } } g_pImport->EnumClose(&hEnum); return TRUE; } void DumpTypedefs(void* GUICookie) { DWORD i; char* szptr; CQuickBytes out; printLine(GUICookie,""); for(i = 0; i < g_NumTypedefs; i++) { TypeDefDescr* pTDD = &((*g_typedefs)[i]); szptr = &szString[0]; szString[0] = 0; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,ANCHORPT(KEYWORD(".typedef"),pTDD->tkSelf)); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),pTDD->tkSelf); { ULONG n = g_NumTypedefs; DWORD tk = pTDD->tkTypeSpec; switch (TypeFromToken(tk)) { default: break; case mdtCustomAttribute: printLine(GUICookie,szString); strcat_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH," "); { mdToken tkType; mdToken tkOwner; BYTE* pBlob=NULL; ULONG uLen=0; tkType = GET_UNALIGNED_VAL32(pTDD->psig); tkOwner = GET_UNALIGNED_VAL32(pTDD->psig + sizeof(mdToken)); if(pTDD->cb > 2*sizeof(mdToken)) { pBlob = (BYTE*)pTDD->psig + 2*sizeof(mdToken); uLen = pTDD->cb - 2*sizeof(mdToken); } DumpCustomAttributeProps(0,tkType,tkOwner,pBlob,uLen,GUICookie, (RidFromToken(tkOwner)!=0)); } sprintf_s(szString,SZSTRING_SIZE,"%s %s %s", g_szAsmCodeIndent,KEYWORD("as"), ProperName((*g_typedefs)[i].szName)); printLine(GUICookie,szString); g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-8]=0; continue; case mdtMethodDef: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("method ")); break; case mdtFieldDef: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("field ")); break; case mdtMemberRef: { PCCOR_SIGNATURE typePtr; const char *pszMemberName; ULONG cComSig; if (FAILED(g_pImport->GetNameAndSigOfMemberRef( tk, &typePtr, &cComSig, &pszMemberName))) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"ERROR "); break; } unsigned callConv = CorSigUncompressData(typePtr); if (isCallConv(callConv, IMAGE_CEE_CS_CALLCONV_FIELD)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("field ")); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("method ")); break; } } g_NumTypedefs = 0; PrettyPrintToken(szString, tk, g_pImport,g_pFile,0); g_NumTypedefs = n; szptr = &szString[strlen(szString)]; } szptr+= sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," %s %s", KEYWORD("as"), ProperName((*g_typedefs)[i].szName)); printLine(GUICookie,szString); } } BOOL PrintClassList() { DWORD i; BOOL fSuccess = FALSE; //char szString[1024]; char* szptr; if(g_NumClasses) { printLine(g_pFile,COMMENT("// Classes defined in this module:")); printLine(g_pFile,COMMENT("//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")); for (i = 0; i < g_NumClasses; i++) { const char *pszClassName; const char *pszNamespace; DWORD dwClassAttrs; mdTypeRef crExtends; if (FAILED(g_pImport->GetNameOfTypeDef( g_cl_list[i], &pszClassName, &pszNamespace))) { printLine(g_pFile, COMMENT("// Invalid TypeDef record")); return FALSE; } MAKE_NAME_IF_NONE(pszClassName,g_cl_list[i]); // if this is the "<Module>" class (there is a misnomer) then skip it! if (FAILED(g_pImport->GetTypeDefProps( g_cl_list[i], &dwClassAttrs, &crExtends))) { printLine(g_pFile, COMMENT("// Invalid TypeDef record")); return FALSE; } szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"// "); if (IsTdInterface(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"Interface "); //else if (IsTdValueType(dwClassAttrs)) szptr+=sprintf(szptr,"Value Class"); //else if (IsTdUnmanagedValueType(dwClassAttrs)) szptr+=sprintf(szptr,"NotInGCHeap Value Class"); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"Class "); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%-30s ", pszClassName); if (IsTdPublic(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(public) "); if (IsTdAbstract(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(abstract) "); if (IsTdAutoLayout(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(auto) "); if (IsTdSequentialLayout(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(sequential) "); if (IsTdExplicitLayout(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(explicit) "); if (IsTdAnsiClass(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(ansi) "); if (IsTdUnicodeClass(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(unicode) "); if (IsTdAutoClass(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(autochar) "); if (IsTdImport(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(import) "); if (IsTdWindowsRuntime(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(windowsruntime) "); //if (IsTdEnum(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(enum) "); if (IsTdSealed(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(sealed) "); if (IsTdNestedPublic(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(nested public) "); if (IsTdNestedPrivate(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(nested private) "); if (IsTdNestedFamily(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(nested family) "); if (IsTdNestedAssembly(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(nested assembly) "); if (IsTdNestedFamANDAssem(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(nested famANDassem) "); if (IsTdNestedFamORAssem(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(nested famORassem) "); printLine(g_pFile,COMMENT(szString)); } printLine(g_pFile,COMMENT("//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")); printLine(g_pFile,""); } else printLine(g_pFile,COMMENT("// No classes defined in this module")); fSuccess = TRUE; return fSuccess; } BOOL ValidateToken(mdToken tk, ULONG type = (ULONG) ~0) { BOOL bRtn; //char szString[1024]; bRtn = g_pImport->IsValidToken(tk); if (!bRtn) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_INVALIDTK), tk); printError(g_pFile,szString); } else if (type != (ULONG) ~0 && TypeFromToken(tk) != type) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_UNEXPTYPE), TypeFromToken(type), TypeFromToken(tk)); printError(g_pFile,szString); bRtn = FALSE; } return bRtn; } BOOL DumpModule(mdModuleRef mdMod) { const char *pszModName; //char szString[1024]; if (FAILED(g_pImport->GetModuleRefProps(mdMod,&pszModName))) { pszModName = "Invalid ModuleRef record"; } MAKE_NAME_IF_NONE(pszModName,mdMod); sprintf_s(szString,SZSTRING_SIZE,"%s%s \"%s\"",g_szAsmCodeIndent,KEYWORD(".import"),pszModName); // what about GUID and MVID? printLine(g_pFile,szString); return TRUE; } char* DumpPinvokeMap(DWORD dwMappingFlags, const char *szImportName, mdModuleRef mrImportDLL, __inout __nullterminated char* szString, void* GUICookie) { const char *szImportDLLName; char* szptr = &szString[strlen(szString)]; if (FAILED(g_pImport->GetModuleRefProps(mrImportDLL,&szImportDLLName))) { szImportDLLName = "Invalid ModuleRef record"; } if(strlen(szImportDLLName) != 0) { szptr = DumpQString(GUICookie, (char*)szImportDLLName, g_szAsmCodeIndent, 80); } //if(strlen(szImportDLLName)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"\"%s\"",szImportDLLName); //if(szImportName && strlen(szImportName)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," as \"%s\"",szImportName); if(szImportName && strlen(szImportName)) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD(" as ")); szptr = DumpQString(GUICookie, (char*)szImportName, g_szAsmCodeIndent, 80); } szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)0)); if(IsPmNoMangle(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," nomangle"); if(IsPmCharSetAnsi(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," ansi"); if(IsPmCharSetUnicode(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," unicode"); if(IsPmCharSetAuto(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," autochar"); if(IsPmSupportsLastError(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," lasterr"); if(IsPmCallConvWinapi(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," winapi"); if(IsPmCallConvCdecl(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," cdecl"); if(IsPmCallConvThiscall(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," thiscall"); if(IsPmCallConvFastcall(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," fastcall"); if(IsPmCallConvStdcall(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," stdcall"); if(IsPmBestFitEnabled(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," bestfit:on"); if(IsPmBestFitDisabled(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," bestfit:off"); if(IsPmThrowOnUnmappableCharEnabled(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," charmaperror:on"); if(IsPmThrowOnUnmappableCharDisabled(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," charmaperror:off"); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)-1)); return szptr; } void DumpByteArray(__inout __nullterminated char* szString, const BYTE* pBlob, ULONG ulLen, void* GUICookie) { ULONG32 ulStrOffset = 0; ULONG32 j = 0; ULONG32 k = 0; ULONG32 m = 0; char sz[256]; bool printsz = FALSE; char* szptr = NULL; BYTE byt = 0; ulStrOffset = (ULONG32) strlen(szString); szptr = &szString[ulStrOffset]; if(!pBlob) ulLen = 0; for(j = 0, k=0, m=0; j < ulLen; j++,k++,m++) { if(k == 16) { if(printsz) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT(" // %s"),sz); } printLine(GUICookie,szString); strcpy_s(szString,SZSTRING_SIZE,g_szAsmCodeIndent); for(k=(ULONG32) strlen(szString); k < ulStrOffset; k++) szString[k] = ' '; szString[k] = 0; szptr = &szString[ulStrOffset]; k = 0; m = 0; printsz = FALSE; } bool bBreak = FALSE; PAL_CPP_TRY { byt = pBlob[j]; } PAL_CPP_CATCH_ALL { strcat_s(szString, SZSTRING_SIZE,ERRORMSG("INVALID DATA ADDRESS")); bBreak = TRUE; } PAL_CPP_ENDTRY; if (bBreak) break; szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%2.2X ",byt); if(isprint(byt)) { if(g_fDumpRTF) { if((byt == '\\')||(byt=='{')||(byt=='}')) sz[m++]='\\'; sz[m] = byt; } else if(g_fDumpHTML) { if(byt == '<') { sz[m] = 0; strcat_s(sz,256-m,LTN()); m+=(ULONG32)(strlen(LTN())); } else if(byt == '>') { sz[m] = 0; strcat_s(sz,256-m,GTN()); m+=(ULONG32)(strlen(GTN())); } else sz[m] = byt; } else sz[m] = byt; printsz = TRUE; } else sz[m] = '.'; sz[m+1] = 0; } szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),") "); if(printsz) { for(j = k; j < 16; j++) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," "); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("// %s"),sz); } } mdToken ResolveTypeDefReflectionNotation(IMDInternalImport *pIMDI, LPCUTF8 szNamespace, __inout LPUTF8 szName, mdToken tkEncloser) { mdToken tk = 0; LPUTF8 pch = strrchr(szName, '+'); if(pch != NULL) { *pch = 0; tkEncloser = ResolveTypeDefReflectionNotation(pIMDI,szNamespace,szName,tkEncloser); szNamespace = ""; szName = pch+1; } if(SUCCEEDED(pIMDI->FindTypeDef(szNamespace,szName,tkEncloser,&tk))) return tk; else return 0; } mdToken ResolveTypeRefReflectionNotation(IMDInternalImport *pIMDI, _In_ __nullterminated const char* szNamespace, __inout __nullterminated char* szName, mdToken tkResScope) { mdToken tk = 0; char* pch = strrchr(szName, '+'); if(pch != NULL) { *pch = 0; tkResScope = ResolveTypeRefReflectionNotation(pIMDI,szNamespace,szName,tkResScope); szNamespace = ""; szName = pch+1; } if(SUCCEEDED(pIMDI->FindTypeRefByName((LPCSTR)szNamespace,(LPCSTR)szName,tkResScope,&tk))) return tk; else return 0; } mdToken ResolveReflectionNotation(BYTE* dataPtr, unsigned Lstr, IMDInternalImport *pIMDI, void* GUICookie) { char* str = new char[Lstr+1]; mdToken ret = 0; if(str) { char szNamespaceDefault[] = ""; char* szNamespace = szNamespaceDefault; char* szName = str; char* szAssembly = NULL; char szAssemblyMscorlib[] = "mscorlib"; char* pch; memcpy(str,dataPtr,Lstr); str[Lstr] = 0; //format: Namespace.Name, Assembly,... pch = strchr(str,','); if(pch) { *pch = 0; for(szAssembly = pch+1; *szAssembly == ' '; szAssembly++); pch = strchr(szAssembly,','); if(pch) *pch = 0; } pch = strrchr(str,'.'); if(pch) { *pch = 0; szNamespace = str; szName = pch+1; } if(szAssembly == NULL) { // Look in TypeDefs mdToken tk = ResolveTypeDefReflectionNotation(pIMDI,szNamespace,szName,mdTypeDefNil); if(tk != 0) ret = tk; else // TypeDef not found, try TypeRef from mscorlib szAssembly = szAssemblyMscorlib; } if(szAssembly != NULL) { // Look in TypeRefs // First, identify resolution scope _ASSERTE(*szName); ULONG mAsmRefs = pIMDI->GetCountWithTokenKind(mdtAssemblyRef); if(mAsmRefs) { mdToken tkResScope = 0; mdToken tk=TokenFromRid(mdtAssemblyRef,1), tkmax=TokenFromRid(mdtAssemblyRef,mAsmRefs); LPCSTR szAsmRefName; // these are dummies const void* pPKT, *pHash; ULONG ulPKT,ulHash; AssemblyMetaDataInternal MD; DWORD dwFlags; for (;tk <= tkmax; tk++) { if (FAILED(pIMDI->GetAssemblyRefProps(tk,&pPKT,&ulPKT,&szAsmRefName,&MD,&pHash,&ulHash,&dwFlags))) { continue; } if(0==strcmp(szAsmRefName,szAssembly)) { tkResScope = tk; break; } } if(tkResScope) { ret = ResolveTypeRefReflectionNotation(pIMDI,szNamespace,szName,tkResScope); } } } } VDELETE(str); return ret; } unsigned UnderlyingTypeOfEnumTypeDef(mdToken tk, IMDInternalImport *pIMDI) { // make sure it's a TypeDef if(TypeFromToken(tk) != mdtTypeDef) return 0; // make sure it's an enum mdToken tkParent; DWORD dwAttr; if (FAILED(pIMDI->GetTypeDefProps(tk,&dwAttr,&tkParent))) { return 0; } if(RidFromToken(tkParent)==0) return 0; LPCSTR szName, szNamespace; switch(TypeFromToken(tkParent)) { case mdtTypeDef: if (FAILED(pIMDI->GetNameOfTypeDef(tkParent, &szName, &szNamespace))) { return 0; } break; case mdtTypeRef: if (FAILED(pIMDI->GetNameOfTypeRef(tkParent, &szNamespace, &szName))) { return 0; } break; default: return 0; } if (strcmp(szName,"Enum") != 0 || strcmp(szNamespace,"System") != 0) { // the parent type is not System.Enum so this type has no underlying type return 0; } // OK, it's an enum; find its instance field and get its type HENUMInternal hEnum; mdToken tkField; if (FAILED(pIMDI->EnumInit(mdtFieldDef,tk,&hEnum))) { return 0; } while(pIMDI->EnumNext(&hEnum,&tkField)) { if (FAILED(pIMDI->GetFieldDefProps(tkField, &dwAttr))) { continue; } if (IsFdStatic(dwAttr)) { continue; } PCCOR_SIGNATURE psig; if (FAILED(pIMDI->GetSigOfFieldDef(tkField,(ULONG*)&dwAttr, &psig))) { continue; } pIMDI->EnumClose(&hEnum); return (unsigned) *(psig+1); } // no instance field found -- error! pIMDI->EnumClose(&hEnum); return 0; } mdToken TypeRefToTypeDef(mdToken tk, IMDInternalImport *pIMDI, IMDInternalImport **ppIMDInew) { mdToken tkEncloser = mdTypeDefNil; mdToken tkTypeDef = mdTypeDefNil; *ppIMDInew = NULL; // get the resolution scope of TypeRef mdToken tkRS; if (FAILED(pIMDI->GetResolutionScopeOfTypeRef(tk, &tkRS))) { goto AssignAndReturn; } if (TypeFromToken(tkRS) == mdtTypeRef) tkEncloser = TypeRefToTypeDef(tkRS,pIMDI,ppIMDInew); else if (TypeFromToken(tkRS) == mdtAssemblyRef) { *ppIMDInew = g_asmref_import[RidFromToken(tkRS)]; if (*ppIMDInew == NULL) { // get that assembly and open IMDInternalImport IMetaDataAssemblyImport* pAssemblyImport; if (FAILED(g_pPubImport->QueryInterface(IID_IMetaDataAssemblyImport, (void**) &pAssemblyImport))) goto AssignAndReturn; const void *pPKT, *pHash; ULONG cHash,cName; WCHAR wzName[2048]; ASSEMBLYMETADATA md; WCHAR wzLocale[1024]; DWORD dwFlags; IUnknown* pIAMDI[64]; memset(&md,0,sizeof(ASSEMBLYMETADATA)); md.szLocale = wzLocale; md.cbLocale = 1024; struct Param { IMetaDataAssemblyImport* pAssemblyImport; WCHAR *wzName; IUnknown **pIAMDI; ULONG cPKT; } param; param.pAssemblyImport = pAssemblyImport; param.wzName = wzName; param.pIAMDI = pIAMDI; pAssemblyImport->GetAssemblyRefProps(tkRS,&pPKT,&param.cPKT,wzName,2048,&cName,&md,&pHash,&cHash,&dwFlags); PAL_TRY(Param *, pParam, &param) { if(FAILED(pParam->pAssemblyImport->FindAssembliesByName(NULL,NULL,(LPCWSTR)pParam->wzName,pParam->pIAMDI,64,&pParam->cPKT))) pParam->cPKT=0; } PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) { param.cPKT=0; } PAL_ENDTRY pAssemblyImport->Release(); if(param.cPKT == 0) goto AssignAndReturn; _ASSERTE(pIAMDI[0] != NULL); IUnknown *pUnk; if(FAILED(pIAMDI[0]->QueryInterface(IID_IUnknown, (void**)&pUnk))) goto AssignAndReturn; if (FAILED(GetMetaDataInternalInterfaceFromPublic( pUnk, IID_IMDInternalImport, (LPVOID *)ppIMDInew))) { goto AssignAndReturn; } _ASSERTE(*ppIMDInew != NULL); g_asmref_import[RidFromToken(tkRS)] = *ppIMDInew; pUnk->Release(); for(cHash=0; cHash<param.cPKT; cHash++) if(pIAMDI[cHash]) pIAMDI[cHash]->Release(); } } if (*ppIMDInew != NULL) { LPCSTR szName, szNamespace; if (FAILED(pIMDI->GetNameOfTypeRef(tk, &szNamespace, &szName))) { tkTypeDef = mdTypeDefNil; goto AssignAndReturn; } if (FAILED((*ppIMDInew)->FindTypeDef(szNamespace,szName,tkEncloser,&tkTypeDef))) { tkTypeDef = mdTypeDefNil; } } AssignAndReturn: return tkTypeDef; } unsigned UnderlyingTypeOfEnum(mdToken tk, IMDInternalImport *pIMDI) { unsigned uRet = 0; unsigned ix = RidFromToken(tk); if(TypeFromToken(tk)==mdtTypeDef) { if(g_enum_td_type[ix] == 0xFF) { g_enum_td_type[ix] = (BYTE)UnderlyingTypeOfEnumTypeDef(tk,pIMDI); } return (unsigned)g_enum_td_type[ix]; } else if(TypeFromToken(tk)==mdtTypeRef) { if(g_enum_tr_type[ix] == 0xFF) { IMDInternalImport *pIMDInew = NULL; mdToken tkTypeDef = TypeRefToTypeDef(tk,pIMDI, &pIMDInew); if((RidFromToken(tkTypeDef)!=0)&&(pIMDInew != NULL)) { uRet = UnderlyingTypeOfEnumTypeDef(tkTypeDef,pIMDInew); } g_enum_tr_type[ix] = (BYTE)uRet; } return (unsigned)g_enum_tr_type[ix]; } else return 0; } /**************************************************************************/ /* move 'ptr past the exactly one type description */ BYTE* skipType(BYTE* ptr) { mdToken tk; AGAIN: switch(*ptr++) { case ELEMENT_TYPE_VOID : case ELEMENT_TYPE_BOOLEAN : case ELEMENT_TYPE_CHAR : case ELEMENT_TYPE_I1 : case ELEMENT_TYPE_U1 : case ELEMENT_TYPE_I2 : case ELEMENT_TYPE_U2 : case ELEMENT_TYPE_I4 : case ELEMENT_TYPE_U4 : case ELEMENT_TYPE_I8 : case ELEMENT_TYPE_U8 : case ELEMENT_TYPE_R4 : case ELEMENT_TYPE_R8 : case ELEMENT_TYPE_U : case ELEMENT_TYPE_I : case ELEMENT_TYPE_STRING : case ELEMENT_TYPE_OBJECT : case ELEMENT_TYPE_TYPEDBYREF : case ELEMENT_TYPE_SENTINEL : case SERIALIZATION_TYPE_TYPE : case SERIALIZATION_TYPE_TAGGED_OBJECT : /* do nothing */ break; case SERIALIZATION_TYPE_ENUM : { unsigned Lstr = CorSigUncompressData((PCCOR_SIGNATURE&)ptr); ptr += Lstr; break; } case ELEMENT_TYPE_VALUETYPE : case ELEMENT_TYPE_CLASS : ptr += CorSigUncompressToken(ptr, &tk); break; case ELEMENT_TYPE_CMOD_REQD : case ELEMENT_TYPE_CMOD_OPT : ptr += CorSigUncompressToken(ptr, &tk); goto AGAIN; case ELEMENT_TYPE_ARRAY : { ptr = skipType(ptr); // element Type unsigned rank = CorSigUncompressData((PCCOR_SIGNATURE&) ptr); if (rank != 0) { unsigned numSizes = CorSigUncompressData((PCCOR_SIGNATURE&) ptr); while(numSizes > 0) { CorSigUncompressData((PCCOR_SIGNATURE&) ptr); --numSizes; } unsigned numLowBounds = CorSigUncompressData((PCCOR_SIGNATURE&) ptr); while(numLowBounds > 0) { CorSigUncompressData((PCCOR_SIGNATURE&) ptr); --numLowBounds; } } } break; // Modifiers or depedant types case ELEMENT_TYPE_PINNED : case ELEMENT_TYPE_PTR : case ELEMENT_TYPE_BYREF : case ELEMENT_TYPE_SZARRAY : // tail recursion optimization // ptr = skipType(ptr, fFixupType); // break goto AGAIN; case ELEMENT_TYPE_VAR: case ELEMENT_TYPE_MVAR: CorSigUncompressData((PCCOR_SIGNATURE&) ptr); // bound break; case ELEMENT_TYPE_FNPTR: { CorSigUncompressData((PCCOR_SIGNATURE&) ptr); // calling convention unsigned argCnt = CorSigUncompressData((PCCOR_SIGNATURE&) ptr); // arg count ptr = skipType(ptr); // return type while(argCnt > 0) { ptr = skipType(ptr); --argCnt; } } break; case ELEMENT_TYPE_GENERICINST: { ptr = skipType(ptr); // type constructor unsigned argCnt = CorSigUncompressData((PCCOR_SIGNATURE&)ptr); // arg count while(argCnt > 0) { ptr = skipType(ptr); --argCnt; } } break; default: case ELEMENT_TYPE_END : _ASSERTE(!"Unknown Type"); break; } return(ptr); } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:21000) // Suppress PREFast warning about overly large function #endif BYTE* PrettyPrintCABlobValue(PCCOR_SIGNATURE &typePtr, BYTE* dataPtr, BYTE* dataEnd, CQuickBytes* out, IMDInternalImport *pIMDI, void* GUICookie) { char str[64]; char appendix[64]; int typ; BOOL Reiterate; BOOL CloseParenthesis; unsigned numElements = 1; unsigned n,Lstr; unsigned underType; mdToken tk; appendix[0] = 0; do { if(dataPtr >= dataEnd) { _ASSERTE(!"CA blob too short"); return FALSE; } Reiterate = FALSE; CloseParenthesis = TRUE; switch(typ = *typePtr++) { case ELEMENT_TYPE_VOID : return NULL; case ELEMENT_TYPE_BOOLEAN : appendStr(out,KEYWORD("bool")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); appendStr(out,(*dataPtr)? KEYWORD("true"):KEYWORD("false")); dataPtr++; } break; case ELEMENT_TYPE_CHAR : appendStr(out,KEYWORD("char")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"0x%4.4X",(WORD)GET_UNALIGNED_VAL16(dataPtr)); appendStr(out,str); dataPtr += 2; } break; case ELEMENT_TYPE_I1 : appendStr(out,KEYWORD("int8")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"%d",*((char*)dataPtr)); appendStr(out,str); dataPtr ++; } break; case ELEMENT_TYPE_U1 : appendStr(out,KEYWORD("uint8")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"%d",*dataPtr); appendStr(out,str); dataPtr ++; } break; case ELEMENT_TYPE_I2 : appendStr(out,KEYWORD("int16")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"%d",GET_UNALIGNED_VAL16(dataPtr)); appendStr(out,str); dataPtr +=2; } break; case ELEMENT_TYPE_U2 : appendStr(out,KEYWORD("uint16")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"%d",(WORD)GET_UNALIGNED_VAL16(dataPtr)); appendStr(out,str); dataPtr +=2; } break; case ELEMENT_TYPE_I4 : appendStr(out,KEYWORD("int32")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"%d",GET_UNALIGNED_VAL32(dataPtr)); appendStr(out,str); dataPtr +=4; } break; case ELEMENT_TYPE_U4 : appendStr(out,KEYWORD("uint32")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"%d",(unsigned)GET_UNALIGNED_VAL32(dataPtr)); appendStr(out,str); dataPtr +=4; } break; case ELEMENT_TYPE_I8 : appendStr(out,KEYWORD("int64")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"%I64d",GET_UNALIGNED_VAL64(dataPtr)); appendStr(out,str); dataPtr +=8; } break; case ELEMENT_TYPE_U8 : appendStr(out,KEYWORD("uint64")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"%I64d",(ULONGLONG)GET_UNALIGNED_VAL64(dataPtr)); appendStr(out,str); dataPtr +=8; } break; case ELEMENT_TYPE_R4 : appendStr(out,KEYWORD("float32")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); _gcvt_s(str,64,*((float*)dataPtr), 8); float df = (float)atof(str); // Must compare as underlying bytes, not floating point otherwise optmizier will // try to enregister and comapre 80-bit precision number with 32-bit precision number!!!! if((*(ULONG*)&df != (ULONG)GET_UNALIGNED_VAL32(dataPtr))||IsSpecialNumber(str)) sprintf_s(str, 64,"0x%08X",(ULONG)GET_UNALIGNED_VAL32(dataPtr)); appendStr(out,str); dataPtr +=4; } break; case ELEMENT_TYPE_R8 : appendStr(out,KEYWORD("float64")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); char *pch; _gcvt_s(str,64,*((double*)dataPtr), 17); double df = strtod(str, &pch); // Must compare as underlying bytes, not floating point otherwise optmizier will // try to enregister and comapre 80-bit precision number with 64-bit precision number!!!! if((*(ULONGLONG*)&df != (ULONGLONG)GET_UNALIGNED_VAL64(dataPtr))||IsSpecialNumber(str)) sprintf_s(str, 64, "0x%I64X",(ULONGLONG)GET_UNALIGNED_VAL64(dataPtr)); appendStr(out,str); dataPtr +=8; } break; case ELEMENT_TYPE_U : case ELEMENT_TYPE_I : return NULL; case ELEMENT_TYPE_OBJECT : case SERIALIZATION_TYPE_TAGGED_OBJECT: appendStr(out,KEYWORD("object")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { BYTE* dataPtr1 = skipType(dataPtr); if(n) appendStr(out," "); dataPtr = PrettyPrintCABlobValue((PCCOR_SIGNATURE&)dataPtr, dataPtr1, dataEnd, out, pIMDI,GUICookie); if (dataPtr == NULL) return NULL; } break; case ELEMENT_TYPE_STRING : appendStr(out,KEYWORD("string")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); if(*dataPtr == 0xFF) { appendStr(out,KEYWORD("nullref")); Lstr = 1; } else { appendStr(out,"'"); Lstr = CorSigUncompressData((PCCOR_SIGNATURE&)dataPtr); if(dataPtr + Lstr > dataEnd) return NULL; appendStr(out,UnquotedProperName((char*)dataPtr,Lstr)); appendStr(out,"'"); } dataPtr += Lstr; } break; case ELEMENT_TYPE_CLASS : typePtr += CorSigUncompressToken(typePtr, &tk); //skip the following token FALLTHROUGH; case SERIALIZATION_TYPE_TYPE : appendStr(out,KEYWORD("type")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); if(*dataPtr == 0xFF) { appendStr(out,KEYWORD("nullref")); Lstr = 1; } else { Lstr = CorSigUncompressData((PCCOR_SIGNATURE&)dataPtr); if(dataPtr + Lstr > dataEnd) return NULL; tk = ResolveReflectionNotation(dataPtr,Lstr,pIMDI,GUICookie); if(IsNilToken(tk)) { appendStr(out,KEYWORD("class ")); appendStr(out,"'"); appendStr(out,UnquotedProperName((char*)dataPtr,Lstr)); appendStr(out,"'"); } else { PrettyPrintClass(out, tk, pIMDI); } } dataPtr += Lstr; } break; case ELEMENT_TYPE_VALUETYPE : typePtr += CorSigUncompressToken(typePtr, &tk); _ASSERTE(pIMDI->IsValidToken(tk)); goto GetUTSize; case SERIALIZATION_TYPE_ENUM : Lstr = CorSigUncompressData((PCCOR_SIGNATURE&)typePtr); tk = ResolveReflectionNotation((BYTE*)typePtr,Lstr,pIMDI,GUICookie); /* if(IsNilToken(tk)) { _ASSERTE(!"Failed to resolve Reflection notation for S_T_ENUM"); return NULL; } */ typePtr += Lstr; GetUTSize: underType = UnderlyingTypeOfEnum(tk, pIMDI); if(underType == 0) { // try to figure out the underlying type by its size switch(dataEnd - dataPtr) { case 1: // bool underType = ELEMENT_TYPE_BOOLEAN; break; case 2: // int16 underType = ELEMENT_TYPE_I2; break; case 4: // int32 underType = ELEMENT_TYPE_I4; break; case 8: // int64 underType = ELEMENT_TYPE_I8; break; default: return NULL; } //_ASSERTE(!"Failed to find underlying type for S_T_ENUM"); } { PCCOR_SIGNATURE ps = (PCCOR_SIGNATURE)&underType; dataPtr = PrettyPrintCABlobValue(ps, dataPtr, dataEnd, out, pIMDI,GUICookie); } CloseParenthesis = FALSE; break; case ELEMENT_TYPE_SZARRAY : numElements *= (unsigned)GET_UNALIGNED_VAL32(dataPtr); Reiterate = TRUE; sprintf_s(appendix,64,"[%d]",numElements); if(numElements == 0xFFFFFFFF) numElements = 0; dataPtr += 4; break; case ELEMENT_TYPE_ARRAY : case ELEMENT_TYPE_VAR : case ELEMENT_TYPE_MVAR : case ELEMENT_TYPE_FNPTR : case ELEMENT_TYPE_GENERICINST : case ELEMENT_TYPE_TYPEDBYREF : #ifdef LOGGING case ELEMENT_TYPE_INTERNAL : #endif // LOGGING return NULL; // Modifiers or depedent types case ELEMENT_TYPE_CMOD_OPT : case ELEMENT_TYPE_CMOD_REQD : case ELEMENT_TYPE_PINNED : Reiterate = TRUE; break; case ELEMENT_TYPE_PTR : case ELEMENT_TYPE_BYREF : return NULL; default: case ELEMENT_TYPE_SENTINEL : case ELEMENT_TYPE_END : _ASSERTE(!"Unknown Type"); return NULL; } // end switch } while(Reiterate); if(CloseParenthesis) appendStr(out,")"); return dataPtr; } #ifdef _PREFAST_ #pragma warning(pop) #endif BOOL PrettyPrintCustomAttributeNVPairs(unsigned nPairs, BYTE* dataPtr, BYTE* dataEnd, CQuickBytes* out, void* GUICookie) { IMDInternalImport *pIMDI = g_pImport; // ptr to IMDInternalImport class with ComSig while(dataPtr < dataEnd) { // field or property? switch(*dataPtr) { case SERIALIZATION_TYPE_FIELD: appendStr(out,KEYWORD("field ")); break; case SERIALIZATION_TYPE_PROPERTY: appendStr(out,KEYWORD("property ")); break; default: _ASSERTE(!"Invalid code of name/val pair in CA blob"); return FALSE; } dataPtr++; if(dataPtr >= dataEnd) { _ASSERTE(!"CA blob too short"); return FALSE; } // type of the field/property PCCOR_SIGNATURE dataTypePtr = (PCCOR_SIGNATURE)dataPtr; const char* szAppend = ""; if(*dataPtr == ELEMENT_TYPE_SZARRAY) // Only SZARRAY modifier can occur in ser.type { szAppend = "[]"; dataPtr++; } if(*dataPtr == SERIALIZATION_TYPE_TYPE) { appendStr(out,KEYWORD("type")); dataPtr++; } else if(*dataPtr == SERIALIZATION_TYPE_TAGGED_OBJECT) { appendStr(out,KEYWORD("object")); dataPtr++; } else if(*dataPtr == SERIALIZATION_TYPE_ENUM) { appendStr(out,KEYWORD("enum ")); dataPtr++; unsigned Lstr = CorSigUncompressData((PCCOR_SIGNATURE&)dataPtr); if(dataPtr + Lstr > dataEnd) return FALSE; mdToken tk = ResolveReflectionNotation(dataPtr,Lstr,pIMDI,GUICookie); if(IsNilToken(tk)) { appendStr(out,KEYWORD("class ")); appendStr(out,"'"); appendStr(out,UnquotedProperName((char*)dataPtr,Lstr)); appendStr(out,"'"); } else { PrettyPrintClass(out, tk, pIMDI); } dataPtr += Lstr; } else { szAppend = ""; dataPtr = (BYTE*)PrettyPrintType(dataTypePtr, out, pIMDI); } if(*szAppend != 0) appendStr(out,szAppend); if(dataPtr >= dataEnd) { _ASSERTE(!"CA blob too short"); return FALSE; } // name of the field/property unsigned Lstr = CorSigUncompressData((PCCOR_SIGNATURE&)dataPtr); if(dataPtr + Lstr > dataEnd) return FALSE; appendStr(out," '"); appendStr(out,UnquotedProperName((char*)dataPtr,Lstr)); appendStr(out,"' = "); dataPtr += Lstr; if(dataPtr >= dataEnd) { _ASSERTE(!"CA blob too short"); return FALSE; } // value of the field/property dataPtr = PrettyPrintCABlobValue(dataTypePtr, dataPtr, dataEnd, out, pIMDI,GUICookie); if(NULL == dataPtr) return FALSE; appendStr(out,"\n"); nPairs--; } _ASSERTE(nPairs == 0); return TRUE; } BOOL PrettyPrintCustomAttributeBlob(mdToken tkType, BYTE* pBlob, ULONG ulLen, void* GUICookie, __inout __nullterminated char* szString) { char* initszptr = szString + strlen(szString); PCCOR_SIGNATURE typePtr; // type to convert, ULONG typeLen; // the lenght of 'typePtr' CHECK_LOCAL_STATIC_VAR(static CQuickBytes out); // where to put the pretty printed string IMDInternalImport *pIMDI = g_pImport; // ptr to IMDInternalImport class with ComSig unsigned numArgs = 0; unsigned numTyArgs = 0; PCCOR_SIGNATURE typeEnd; unsigned callConv; BYTE* dataPtr = pBlob; BYTE* dataEnd = dataPtr + ulLen; WORD wNumNVPairs = 0; unsigned numElements = 0; if(TypeFromToken(tkType) == mdtMemberRef) { const char *szName_Ignore; if (FAILED(pIMDI->GetNameAndSigOfMemberRef(tkType,&typePtr,&typeLen, &szName_Ignore))) { return FALSE; } } else if(TypeFromToken(tkType) == mdtMethodDef) { if (FAILED(pIMDI->GetSigOfMethodDef(tkType, &typeLen, &typePtr))) { return FALSE; } } else return FALSE; typeEnd = typePtr + typeLen; callConv = CorSigUncompressData(typePtr); if (callConv & IMAGE_CEE_CS_CALLCONV_GENERIC) { numTyArgs = CorSigUncompressData(typePtr); return FALSE; // leave generic instantiations for later } numElements = numArgs = CorSigUncompressData(typePtr); out.Shrink(0); if (!isCallConv(callConv, IMAGE_CEE_CS_CALLCONV_GENERICINST)) { // skip return type typePtr = PrettyPrintType(typePtr, &out, pIMDI); out.Shrink(0); } appendStr(&out," = {"); dataPtr += 2; // skip blob prolog 0x0001 // dump the arguments while(typePtr < typeEnd) { if (*typePtr == ELEMENT_TYPE_SENTINEL) { typePtr++; } else { if (numArgs <= 0) break; dataPtr = PrettyPrintCABlobValue(typePtr, dataPtr, dataEnd-2, &out, pIMDI,GUICookie); if(NULL == dataPtr) return FALSE; appendStr(&out,"\n"); --numArgs; } } _ASSERTE(numArgs == 0); wNumNVPairs = (WORD)GET_UNALIGNED_VAL16(dataPtr); dataPtr+=2; numElements += wNumNVPairs; // arguments done, now to field/property name-val pairs if(!PrettyPrintCustomAttributeNVPairs((unsigned) wNumNVPairs, dataPtr, dataEnd, &out, GUICookie)) return FALSE; { char* sz = asString(&out); char* ch = sz; char* szbl; while((ch = strchr(ch,'\n'))) { *ch = 0; ch++; } // if the string is too long already, begin on next line if((initszptr - szString) > 80) { printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"%s ",g_szAsmCodeIndent); initszptr = &szString[strlen(szString)]; } sprintf_s(initszptr,SZSTRING_REMAINING_SIZE(initszptr), "%s", sz); initszptr += 4; // to compensate for " = {" szbl = szString + strlen(g_szAsmCodeIndent); for(unsigned n = 1; n < numElements; n++) { printLine(GUICookie, szString); sz = sz + strlen(sz) + 1; for(ch = szbl; ch < initszptr; ch++) *ch = ' '; sprintf_s(initszptr,SZSTRING_REMAINING_SIZE(initszptr), "%s", sz); } } strcat_s(initszptr, SZSTRING_REMAINING_SIZE(initszptr),"}"); if(g_fShowBytes) { printLine(GUICookie,szString); strcat_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH," // "); sprintf_s(szString,SZSTRING_SIZE,"%s = ( ",g_szAsmCodeIndent); DumpByteArray(szString,pBlob,ulLen,GUICookie); g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-8] = 0; } return TRUE; } void DumpCustomAttributeProps(mdToken tkCA, mdToken tkType, mdToken tkOwner, BYTE* pBlob, ULONG ulLen, void *GUICookie, bool bWithOwner) { char* szptr = &szString[0]; BOOL fCommentItOut = FALSE; if((TypeFromToken(tkType) == mdtMemberRef)||(TypeFromToken(tkType) == mdtMethodDef)) { mdToken tkParent; const char * pszClassName = NULL; const char * pszNamespace = NULL; if (TypeFromToken(tkType) == mdtMemberRef) { if (FAILED(g_pImport->GetParentOfMemberRef(tkType, &tkParent))) { szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), "Invalid MemberRef %08X record ", tkType); return; } } else { if (FAILED(g_pImport->GetParentToken(tkType, &tkParent))) { szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), "Invalid token %08X ", tkType); return; } } REGISTER_REF(tkOwner,tkType); // owner of the CA references the class amd method REGISTER_REF(tkOwner,tkParent); if (TypeFromToken(tkParent) == mdtTypeDef) { if (FAILED(g_pImport->GetNameOfTypeDef(tkParent, &pszClassName, &pszNamespace))) { szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), "Invalid TypeDef %08X record ", tkParent); return; } } else if (TypeFromToken(tkParent) == mdtTypeRef) { if (FAILED(g_pImport->GetNameOfTypeRef(tkParent, &pszNamespace, &pszClassName))) { szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), "Invalid TypeRef %08X record ", tkParent); return; } } if(pszClassName && pszNamespace && (strcmp(pszNamespace,"System.Diagnostics") == 0) && (strcmp(pszClassName,"DebuggableAttribute") == 0)) fCommentItOut = TRUE; } if(fCommentItOut) { printLine(GUICookie,COMMENT((char*)0)); // start multiline comment sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_AUTOCA),g_szAsmCodeIndent); printLine(GUICookie, szString); strcat_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH,"// "); } szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".custom")); if(bWithOwner) { if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),tkCA); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"("); switch(TypeFromToken(tkOwner)) { case mdtTypeDef : case mdtTypeRef : case mdtTypeSpec: PrettyPrintToken(szString, tkOwner, g_pImport,GUICookie,0); break; case mdtMemberRef: { PCCOR_SIGNATURE typePtr; const char* pszMemberName; ULONG cComSig; if (FAILED(g_pImport->GetNameAndSigOfMemberRef( tkOwner, &typePtr, &cComSig, &pszMemberName))) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"ERROR "); break; } unsigned callConv = CorSigUncompressData(typePtr); if (isCallConv(callConv, IMAGE_CEE_CS_CALLCONV_FIELD)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("field ")); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("method ")); PrettyPrintToken(szString, tkOwner, g_pImport,GUICookie,0); } break; case mdtMethodDef: szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), KEYWORD("method ")); PrettyPrintToken(szString, tkOwner, g_pImport,GUICookie,0); break; default : strcat_s(szptr, SZSTRING_REMAINING_SIZE(szptr),ERRORMSG("UNKNOWN_OWNER")); break; } szptr = &szString[strlen(szString)]; if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),tkOwner); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),") "); } else { if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X:%08X*/ "),tkCA,tkType); } switch(TypeFromToken(tkType)) { case mdtTypeDef : case mdtTypeRef : case mdtMemberRef: case mdtMethodDef: PrettyPrintToken(szString, tkType, g_pImport,GUICookie,0); break; default : strcat_s(szString, SZSTRING_SIZE,ERRORMSG("UNNAMED_CUSTOM_ATTR")); break; } szptr = &szString[strlen(szString)]; if(pBlob && ulLen) { if(!g_fCAVerbal || !PrettyPrintCustomAttributeBlob(tkType, pBlob, ulLen, GUICookie, szString)) { sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = ( "); DumpByteArray(szString,pBlob,ulLen,GUICookie); } } printLine(GUICookie, szString); if(fCommentItOut) { g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-4] = 0; printLine(GUICookie,COMMENT((char*)-1)); // end multiline comment } } void DumpCustomAttribute(mdCustomAttribute tkCA, void *GUICookie, bool bWithOwner) { mdToken tkType; BYTE* pBlob=NULL; ULONG ulLen=0; mdToken tkOwner; static mdToken tkMod = 0xFFFFFFFF; _ASSERTE((TypeFromToken(tkCA)==mdtCustomAttribute)&&(RidFromToken(tkCA)>0)); _ASSERTE(RidFromToken(tkCA) <= g_uNCA); if(tkMod == 0xFFFFFFFF) tkMod = g_pImport->GetModuleFromScope(); // can't use InternalImport here: need the tkOwner if (FAILED(g_pPubImport->GetCustomAttributeProps( // S_OK or error. tkCA, // [IN] CustomValue token. &tkOwner, // [OUT, OPTIONAL] Object token. &tkType, // [OUT, OPTIONAL] Put TypeDef/TypeRef token here. (const void **)&pBlob, // [OUT, OPTIONAL] Put pointer to data here. &ulLen))) // [OUT, OPTIONAL] Put size of date here. { return; } if(!RidFromToken(tkOwner)) return; DWORD i; for(i = 0; i < g_NumTypedefs; i++) { TypeDefDescr* pTDD = &((*g_typedefs)[i]); if(TypeFromToken(pTDD->tkTypeSpec) == mdtCustomAttribute) { mdToken tkTypeTD; mdToken tkOwnerTD; BYTE* pBlobTD=NULL; ULONG uLenTD=0; tkTypeTD = GET_UNALIGNED_VAL32(pTDD->psig); if(tkTypeTD != tkType) continue; tkOwnerTD = GET_UNALIGNED_VAL32(pTDD->psig + sizeof(mdToken)); if(pTDD->cb > 2*sizeof(mdToken)) { pBlobTD = (BYTE*)pTDD->psig + 2*sizeof(mdToken); uLenTD = pTDD->cb - 2*sizeof(mdToken); } if(uLenTD != ulLen) continue; if(memcmp(pBlobTD,pBlob,ulLen) != 0) continue; char* szptr = &szString[0]; szString[0] = 0; szptr += sprintf_s(szString,SZSTRING_SIZE,"%s%s", g_szAsmCodeIndent,JUMPPT(ProperName(pTDD->szName),pTDD->tkSelf)); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),tkCA); printLine(GUICookie,szString); break; } } if(i >= g_NumTypedefs) DumpCustomAttributeProps(tkCA,tkType,tkOwner,pBlob,ulLen,GUICookie,bWithOwner); _ASSERTE(g_rchCA); _ASSERTE(RidFromToken(tkCA) <= g_uNCA); g_rchCA[RidFromToken(tkCA)] = 1; } void DumpCustomAttributes(mdToken tkOwner, void *GUICookie) { if (g_fShowCA) { HENUMInternal hEnum; mdCustomAttribute tkCA; if (FAILED(g_pImport->EnumInit(mdtCustomAttribute, tkOwner,&hEnum))) { return; } while(g_pImport->EnumNext(&hEnum,&tkCA) && RidFromToken(tkCA)) { DumpCustomAttribute(tkCA,GUICookie,false); } g_pImport->EnumClose( &hEnum); } } void DumpDefaultValue(mdToken tok, __inout __nullterminated char* szString, void* GUICookie) { MDDefaultValue MDDV; char* szptr = &szString[strlen(szString)]; if (FAILED(g_pImport->GetDefaultValue(tok, &MDDV))) { szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), ERRORMSG(" /* Invalid default value for %08X: */"), tok); return; } switch(MDDV.m_bType) { case ELEMENT_TYPE_VOID: strcat_s(szString, SZSTRING_SIZE," /* NO CORRESPONDING RECORD IN CONSTANTS TABLE */"); break; case ELEMENT_TYPE_I1: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%02X)",KEYWORD("int8"),MDDV.m_byteValue); break; case ELEMENT_TYPE_U1: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%02X)",KEYWORD("uint8"),MDDV.m_byteValue); break; case ELEMENT_TYPE_I2: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%04X)",KEYWORD("int16"),MDDV.m_usValue); break; case ELEMENT_TYPE_U2: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%04X)",KEYWORD("uint16"),MDDV.m_usValue); break; case ELEMENT_TYPE_I4: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%08X)",KEYWORD("int32"),MDDV.m_ulValue); break; case ELEMENT_TYPE_U4: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%08X)",KEYWORD("uint32"),MDDV.m_ulValue); break; case ELEMENT_TYPE_CHAR: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%04X)",KEYWORD("char"),MDDV.m_usValue); break; case ELEMENT_TYPE_BOOLEAN: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s",KEYWORD("bool")); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(%s)", KEYWORD((char *)(MDDV.m_byteValue ? "true" : "false"))); break; case ELEMENT_TYPE_I8: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%I64X)",KEYWORD("int64"),MDDV.m_ullValue); break; case ELEMENT_TYPE_U8: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%I64X)",KEYWORD("uint64"),MDDV.m_ullValue); break; case ELEMENT_TYPE_R4: { char szf[32]; _gcvt_s(szf,32,MDDV.m_fltValue, 8); float df = (float)atof(szf); // Must compare as underlying bytes, not floating point otherwise optmizier will // try to enregister and comapre 80-bit precision number with 32-bit precision number!!!! if((*(ULONG*)&df == MDDV.m_ulValue)&&!IsSpecialNumber(szf)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(%s)",KEYWORD("float32"),szf); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), " = %s(0x%08X)",KEYWORD("float32"),MDDV.m_ulValue); } break; case ELEMENT_TYPE_R8: { char szf[32], *pch; _gcvt_s(szf,32,MDDV.m_dblValue, 17); double df = strtod(szf, &pch); //atof(szf); szf[31]=0; // Must compare as underlying bytes, not floating point otherwise optmizier will // try to enregister and comapre 80-bit precision number with 64-bit precision number!!!! if((*(ULONGLONG*)&df == MDDV.m_ullValue)&&!IsSpecialNumber(szf)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(%s)",KEYWORD("float64"),szf); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), " = %s(0x%I64X) // %s",KEYWORD("float64"),MDDV.m_ullValue,szf); } break; case ELEMENT_TYPE_STRING: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = "); PAL_CPP_TRY { szptr = DumpUnicodeString(GUICookie,szString,(WCHAR*)MDDV.m_wzValue,MDDV.m_cbSize/sizeof(WCHAR)); } PAL_CPP_CATCH_ALL { strcat_s(szString, SZSTRING_SIZE,ERRORMSG("INVALID DATA ADDRESS")); } PAL_CPP_ENDTRY; break; case ELEMENT_TYPE_CLASS: if(MDDV.m_wzValue==NULL) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s",KEYWORD("nullref")); break; } //else fall thru to default case, to report the error FALLTHROUGH; default: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),ERRORMSG(" /* ILLEGAL CONSTANT type:0x%02X, size:%d bytes, blob: "),MDDV.m_bType,MDDV.m_cbSize); if(MDDV.m_wzValue) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"("); PAL_CPP_TRY { DumpByteArray(szString,(BYTE*)MDDV.m_wzValue,MDDV.m_cbSize,GUICookie); } PAL_CPP_CATCH_ALL { szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),ERRORMSG(" Invalid blob at 0x%08X)"), MDDV.m_wzValue); } PAL_CPP_ENDTRY } else { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"NULL"); } strcat_s(szString, SZSTRING_REMAINING_SIZE(szptr), " */"); break; } } void DumpParams(ParamDescriptor* pPD, ULONG ulParams, void* GUICookie) { if(pPD) { for(ULONG i = ulParams; i<2*ulParams+1; i++) // pPD[ulParams] is return value { ULONG j = i % (ulParams+1); if(RidFromToken(pPD[j].tok)) { HENUMInternal hEnum; mdCustomAttribute tkCA; ULONG ulCAs= 0; if(g_fShowCA) { if (FAILED(g_pImport->EnumInit(mdtCustomAttribute, pPD[j].tok, &hEnum))) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: MetaData error enumerating CustomAttribute for %08X", g_szAsmCodeIndent, pPD[j].tok); printLine(GUICookie, szString); continue; } ulCAs = g_pImport->EnumGetCount(&hEnum); } if(ulCAs || IsPdHasDefault(pPD[j].attr)) { char *szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s [%d]",g_szAsmCodeIndent,KEYWORD(".param"),i-ulParams); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),pPD[j].tok); if(IsPdHasDefault(pPD[j].attr)) DumpDefaultValue(pPD[j].tok, szString, GUICookie); printLine(GUICookie, szString); if(ulCAs) { while(g_pImport->EnumNext(&hEnum,&tkCA) && RidFromToken(tkCA)) { DumpCustomAttribute(tkCA,GUICookie,false); } } } if(g_fShowCA) g_pImport->EnumClose( &hEnum); } } } } BOOL DumpPermissionSetBlob(void* GUICookie,__inout __nullterminated char* szString, BYTE* pvPermission, ULONG cbPermission) { if(*pvPermission == '.') { CQuickBytes out; pvPermission++; char* szptr_init = &szString[strlen(szString)]; char* szptr = szptr_init; appendStr(&out," = {"); unsigned nAttrs = CorSigUncompressData((PCCOR_SIGNATURE&)pvPermission); for(unsigned iAttr = 0; iAttr < nAttrs; iAttr++) { unsigned L = CorSigUncompressData((PCCOR_SIGNATURE&)pvPermission); // class name length mdToken tkAttr = ResolveReflectionNotation(pvPermission,L,g_pImport,GUICookie); if(IsNilToken(tkAttr)) { appendStr(&out,KEYWORD("class ")); appendStr(&out,"'"); appendStr(&out,UnquotedProperName((char*)pvPermission,L)); appendStr(&out,"'"); } else { PrettyPrintClass(&out, tkAttr, g_pImport); } pvPermission += L; appendStr(&out," = {"); // dump blob L = CorSigUncompressData((PCCOR_SIGNATURE&)pvPermission); // blob length if(L > 0) { BYTE* pvEnd = pvPermission+L; L = CorSigUncompressData((PCCOR_SIGNATURE&)pvPermission); // number of props if(L > 0) { if(!PrettyPrintCustomAttributeNVPairs(L, pvPermission, pvEnd, &out, GUICookie)) return FALSE; out.Shrink(out.Size()-1); } pvPermission = pvEnd; } appendStr(&out, iAttr == nAttrs-1 ? "}" : "}, "); } appendStr(&out, "}"); char* sz = asString(&out); while(char* pc = strstr(sz,"}, ")) { *(pc+2) = 0; strcpy_s(szptr,SZSTRING_REMAINING_SIZE(szptr), sz); printLine(GUICookie,szString); sz = pc+3; if(szptr == szptr_init) szptr += 4; // to compensate for = { for(pc = szString; pc < szptr; pc++) *pc = ' '; } strcpy_s(szptr, SZSTRING_REMAINING_SIZE(szptr),sz); return TRUE; } return FALSE; } void DumpPermissions(mdToken tkOwner, void* GUICookie) { HCORENUM hEnum = NULL; static mdPermission rPerm[16384]; ULONG count; HRESULT hr; //static char szString[4096]; // can't use internal import here: EnumInit not impl. for mdtPrmission while (SUCCEEDED(hr = g_pPubImport->EnumPermissionSets( &hEnum, tkOwner, 0, rPerm, 16384, &count)) && count > 0) { for (ULONG i = 0; i < count; i++) { DWORD dwAction; const BYTE *pvPermission=NULL; ULONG cbPermission=0; const char *szAction; char *szptr; szptr = &szString[0]; if(SUCCEEDED(hr = g_pPubImport->GetPermissionSetProps( rPerm[i], &dwAction, (const void**)&pvPermission, &cbPermission))) { szptr += sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".permissionset")); switch(dwAction) { case dclActionNil: szAction = ""; break; case dclRequest: szAction = KEYWORD("request"); break; case dclDemand: szAction = KEYWORD("demand"); break; case dclAssert: szAction = KEYWORD("assert"); break; case dclDeny: szAction = KEYWORD("deny"); break; case dclPermitOnly: szAction = KEYWORD("permitonly"); break; case dclLinktimeCheck: szAction = KEYWORD("linkcheck"); break; case dclInheritanceCheck: szAction = KEYWORD("inheritcheck"); break; case dclRequestMinimum: szAction = KEYWORD("reqmin"); break; case dclRequestOptional: szAction = KEYWORD("reqopt"); break; case dclRequestRefuse: szAction = KEYWORD("reqrefuse"); break; case dclPrejitGrant: szAction = KEYWORD("prejitgrant"); break; case dclPrejitDenied: szAction = KEYWORD("prejitdeny"); break; case dclNonCasDemand: szAction = KEYWORD("noncasdemand"); break; case dclNonCasLinkDemand: szAction = KEYWORD("noncaslinkdemand"); break; case dclNonCasInheritance: szAction = KEYWORD("noncasinheritance"); break; default: szAction = ERRORMSG("<UNKNOWN_ACTION>"); break; } szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),szAction); if(pvPermission && cbPermission) { printLine(GUICookie, szString); sprintf_s(szString,SZSTRING_SIZE,"%s ",g_szAsmCodeIndent); if(!DumpPermissionSetBlob(GUICookie,szString,(BYTE*)pvPermission,cbPermission)) { strcat_s(szString,SZSTRING_SIZE,KEYWORD("bytearray")); strcat_s(szString,SZSTRING_SIZE," ("); DumpByteArray(szString, pvPermission, cbPermission, GUICookie); } printLine(GUICookie,szString); } else // i.e. if pvPermission == NULL or cbPermission == NULL { sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = ()"); printLine(GUICookie,szString); } DumpCustomAttributes(rPerm[i],GUICookie); }// end if(GetPermissionProps) } // end for(all permissions) }//end while(EnumPermissionSets) g_pPubImport->CloseEnum( hEnum); } void PrettyPrintMethodSig(__inout __nullterminated char* szString, unsigned* puStringLen, CQuickBytes* pqbMemberSig, PCCOR_SIGNATURE pComSig, ULONG cComSig, __inout __nullterminated char* buff, _In_opt_z_ char* szArgPrefix, void* GUICookie) { unsigned uMaxWidth = 40; if(g_fDumpHTML || g_fDumpRTF) uMaxWidth = 240; if(*buff && (strlen(szString) > (size_t)uMaxWidth)) { printLine(GUICookie,szString); strcpy_s(szString,SZSTRING_SIZE,g_szAsmCodeIndent); strcat_s(szString,SZSTRING_SIZE," "); // to align with ".method " } appendStr(pqbMemberSig, szString); { char* pszTailSig = (char*)PrettyPrintSig(pComSig, cComSig, buff, pqbMemberSig, g_pImport, szArgPrefix); if(*buff) { size_t L = strlen(pszTailSig); char* newbuff = new char[strlen(buff)+3]; sprintf_s(newbuff,strlen(buff)+3," %s(", buff); char* pszOffset = strstr(pszTailSig,newbuff); if(pszOffset) { char* pszTailSigRemainder = new char[L+1]; if(pszOffset - pszTailSig > (int)uMaxWidth) { char* pszOffset2 = strstr(pszTailSig," marshal("); if(pszOffset2 && (pszOffset2 < pszOffset)) { *pszOffset2 = 0; strcpy_s(pszTailSigRemainder,L,pszOffset2+1); printLine(GUICookie,pszTailSig); strcpy_s(pszTailSig,L,g_szAsmCodeIndent); strcat_s(pszTailSig,L," "); // to align with ".method " strcat_s(pszTailSig,L,pszTailSigRemainder); pszOffset = strstr(pszTailSig,newbuff); } *pszOffset = 0 ; strcpy_s(pszTailSigRemainder,L,pszOffset+1); printLine(GUICookie,pszTailSig); strcpy_s(pszTailSig,L,g_szAsmCodeIndent); strcat_s(pszTailSig,L," "); // to align with ".method " strcat_s(pszTailSig,L,pszTailSigRemainder); pszOffset = strstr(pszTailSig,newbuff); } size_t i, j, k, l, indent = pszOffset - pszTailSig + strlen(buff) + 2; char chAfterComma; char *pComma = pszTailSig+strlen(buff), *pch; while((pComma = strchr(pComma,','))) { for(pch = pszTailSig, i=0, j = 0, k=0, l=0; pch < pComma; pch++) { if(*pch == '\\') pch++; else { if(*pch == '\'') j=1-j; else if(*pch == '\"') k=1-k; else if(j==0) { if(*pch == '[') i++; else if(*pch == ']') i--; else if(strncmp(pch,LTN(),strlen(LTN()))==0) l++; else if(strncmp(pch,GTN(),strlen(GTN()))==0) l--; } } } pComma++; if((i==0)&&(j==0)&&(k==0)&&(l==0))// no brackets/quotes or all opened/closed { chAfterComma = *pComma; strcpy_s(pszTailSigRemainder,L,pComma); *pComma = 0; printLine(GUICookie,pszTailSig); *pComma = chAfterComma; for(i=0; i<indent; i++) pszTailSig[i] = ' '; strcpy_s(&pszTailSig[indent],L-indent,pszTailSigRemainder); pComma = pszTailSig; } } if(*puStringLen < (unsigned)strlen(pszTailSig)+128) { //free(szString); *puStringLen = (unsigned)strlen(pszTailSig)+128; // need additional space for "il managed" etc. //szString = (char*)malloc(*puStringLen); } VDELETE(pszTailSigRemainder); } strcpy_s(szString,SZSTRING_SIZE,pszTailSig); VDELETE(newbuff); } else // it's for GUI, don't split it into several lines { size_t L = strlen(szString); if(L < 2048) { L = 2048-L; strncpy_s(szString,SZSTRING_SIZE,pszTailSig,L); } } } } // helper to avoid mixing of SEH and stack objects with destructors BOOL DisassembleWrapper(IMDInternalImport *pImport, BYTE *ILHeader, void *GUICookie, mdToken FuncToken, ParamDescriptor* pszArgname, ULONG ulArgs) { BOOL fRet = FALSE; //char szString[4096]; PAL_CPP_TRY { fRet = Disassemble(pImport, ILHeader, GUICookie, FuncToken, pszArgname, ulArgs); } PAL_CPP_CATCH_ALL { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_DASMERR),g_szAsmCodeIndent); printLine(GUICookie, szString); } PAL_CPP_ENDTRY return fRet; } BOOL PrettyPrintGP( // prints name of generic param, or returns FALSE mdToken tkOwner, // Class, method or 0 CQuickBytes *out, // where to put the pretty printed generic param int n) // Index of generic param { BOOL ret = FALSE; if(tkOwner && ((TypeFromToken(tkOwner)==mdtTypeDef)||(TypeFromToken(tkOwner)==mdtMethodDef))) { DWORD NumTyPars; HENUMInternal hEnumTyPar; if(SUCCEEDED(g_pImport->EnumInit(mdtGenericParam,tkOwner,&hEnumTyPar))) { NumTyPars = g_pImport->EnumGetCount(&hEnumTyPar); if(NumTyPars > (DWORD)n) { // need this for name dup check LPCSTR *pszName = new LPCSTR[NumTyPars]; if(pszName != NULL) { ULONG ulSequence; DWORD ix,nx; mdToken tk; for(ix = 0, nx = 0xFFFFFFFF; ix < NumTyPars; ix++) { if(g_pImport->EnumNext(&hEnumTyPar,&tk)) { if(SUCCEEDED(g_pImport->GetGenericParamProps(tk,&ulSequence,NULL,NULL,NULL,&pszName[ix]))) { if(ulSequence == (ULONG)n) nx = ix; } } } // if there are dup names, use !0 or !!0 if(nx != 0xFFFFFFFF) { for(ix = 0; ix < nx; ix++) { if(strcmp(pszName[ix],pszName[nx]) == 0) break; } if(ix >= nx) { for(ix = nx+1; ix < NumTyPars; ix++) { if(strcmp(pszName[ix],pszName[nx]) == 0) break; } if(ix >= NumTyPars) { appendStr(out, ProperName((char*)(pszName[nx]))); ret = TRUE; } } } // end if(tkTyPar != 0) delete [] pszName; } // end if(pszName != NULL) } // end if(NumTyPars > (DWORD)n) } // end if(SUCCEEDED(g_pImport->EnumInit(mdtGenericParam,tkOwner,&hEnumTyPar))) g_pImport->EnumClose(&hEnumTyPar); } // end if(tkOwner) return ret; } // Pretty-print formal type parameters for a class or method char *DumpGenericPars(_Inout_updates_(SZSTRING_SIZE) char* szString, mdToken tok, void* GUICookie/*=NULL*/, BOOL fSplit/*=FALSE*/) { WCHAR *wzArgName = wzUniBuf; ULONG chName; mdToken tkConstr[2048]; DWORD NumTyPars; DWORD NumConstrs; mdGenericParam tkTyPar; ULONG ulSequence; DWORD attr; mdToken tkOwner; HCORENUM hEnumTyPar = NULL; HCORENUM hEnumTyParConstr = NULL; char* szptr = &szString[strlen(szString)]; char* szbegin; unsigned i; if (FAILED(g_pPubImport->EnumGenericParams(&hEnumTyPar, tok, &tkTyPar, 1, &NumTyPars))) return NULL; if (NumTyPars > 0) { szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),LTN()); szbegin = szptr; for (i = 1; NumTyPars != 0; i++) { g_pPubImport->GetGenericParamProps(tkTyPar, &ulSequence, &attr, &tkOwner, NULL, wzArgName, UNIBUF_SIZE/2, &chName); //if(wcslen(wzArgName) >= MAX_CLASSNAME_LENGTH) // wzArgName[MAX_CLASSNAME_LENGTH-1] = 0; hEnumTyParConstr = NULL; if (FAILED(g_pPubImport->EnumGenericParamConstraints(&hEnumTyParConstr, tkTyPar, tkConstr, 2048, &NumConstrs))) { g_pPubImport->CloseEnum(hEnumTyPar); return NULL; } *szptr = 0; CHECK_REMAINING_SIZE; switch (attr & gpVarianceMask) { case gpCovariant : szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), "+ "); break; case gpContravariant : szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), "- "); break; } CHECK_REMAINING_SIZE; if ((attr & gpReferenceTypeConstraint) != 0) szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), "class "); CHECK_REMAINING_SIZE; if ((attr & gpNotNullableValueTypeConstraint) != 0) szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), "valuetype "); CHECK_REMAINING_SIZE; if ((attr & gpDefaultConstructorConstraint) != 0) szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), ".ctor "); CHECK_REMAINING_SIZE; if (NumConstrs) { CQuickBytes out; mdToken tkConstrType,tkOwner; szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"("); DWORD ix; for (ix=0; ix<NumConstrs; ix++) { if (FAILED(g_pPubImport->GetGenericParamConstraintProps(tkConstr[ix], &tkOwner, &tkConstrType))) return NULL; if(ix) szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),", "); CHECK_REMAINING_SIZE; out.Shrink(0); szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s",PrettyPrintClass(&out,tkConstrType,g_pImport)); CHECK_REMAINING_SIZE; } if(ix < NumConstrs) break; szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),") "); CHECK_REMAINING_SIZE; } // re-get name, wzUniBuf may not contain it any more g_pPubImport->GetGenericParamProps(tkTyPar, NULL, &attr, NULL, NULL, wzArgName, UNIBUF_SIZE/2, &chName); //if(wcslen(wzArgName) >= MAX_CLASSNAME_LENGTH) // wzArgName[MAX_CLASSNAME_LENGTH-1] = 0; if (chName) { char* sz = (char*)(&wzUniBuf[UNIBUF_SIZE/2]); WszWideCharToMultiByte(CP_UTF8,0,wzArgName,-1,sz,UNIBUF_SIZE,NULL,NULL); szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s",ProperName(sz)); } CHECK_REMAINING_SIZE; if (FAILED(g_pPubImport->EnumGenericParams(&hEnumTyPar, tok, &tkTyPar, 1, &NumTyPars))) return NULL; if (NumTyPars != 0) { *szptr++ = ','; if(fSplit && (i == 4)) { *szptr = 0; printLine(GUICookie,szString); i = 0; // mind i++ at the end of the loop for(szptr = szString; szptr < szbegin; szptr++) *szptr = ' '; } } } // end for (i = 1; NumTyPars != 0; i++) if(NumTyPars != 0) // all type parameters can't fit in szString, error { strcpy_s(szptr,4,"..."); szptr += 3; } else szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),GTN()); } // end if (NumTyPars > 0) *szptr = 0; if(hEnumTyPar) g_pPubImport->CloseEnum(hEnumTyPar); return szptr; } void DumpGenericParsCA(mdToken tok, void* GUICookie/*=NULL*/) { DWORD NumTyPars; mdGenericParam tkTyPar; HCORENUM hEnumTyPar = NULL; unsigned i; WCHAR *wzArgName = wzUniBuf; ULONG chName; DWORD attr; if(g_fShowCA) { for(i=0; SUCCEEDED(g_pPubImport->EnumGenericParams(&hEnumTyPar, tok, &tkTyPar, 1, &NumTyPars)) &&(NumTyPars > 0); i++) { HENUMInternal hEnum; mdCustomAttribute tkCA; ULONG ulCAs= 0; if (FAILED(g_pImport->EnumInit(mdtCustomAttribute, tkTyPar, &hEnum))) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: MetaData error enumerating CustomAttribute for %08X", g_szAsmCodeIndent, tkTyPar); printLine(GUICookie, szString); return; } ulCAs = g_pImport->EnumGetCount(&hEnum); if(ulCAs) { char *szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".param type")); if(SUCCEEDED(g_pPubImport->GetGenericParamProps(tkTyPar, NULL, &attr, NULL, NULL, wzArgName, UNIBUF_SIZE/2, &chName)) &&(chName > 0)) { //if(wcslen(wzArgName) >= MAX_CLASSNAME_LENGTH) // wzArgName[MAX_CLASSNAME_LENGTH-1] = 0; char* sz = (char*)(&wzUniBuf[UNIBUF_SIZE/2]); WszWideCharToMultiByte(CP_UTF8,0,wzArgName,-1,sz,UNIBUF_SIZE,NULL,NULL); szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s ",ProperName(sz)); } else szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"[%d] ",i+1); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),tkTyPar); printLine(GUICookie, szString); strcat_s(g_szAsmCodeIndent, MAX_MEMBER_LENGTH, " "); while(g_pImport->EnumNext(&hEnum,&tkCA) && RidFromToken(tkCA)) { DumpCustomAttribute(tkCA,GUICookie,false); } g_szAsmCodeIndent[strlen(g_szAsmCodeIndent) - 2] = 0; } g_pImport->EnumClose( &hEnum); // mdtCustomAttribute ULONG ulSequence; DWORD attr; mdToken tkOwner; HCORENUM hEnumTyParConstraint; mdToken tkConstraint[2048]; DWORD NumConstraints; g_pPubImport->GetGenericParamProps(tkTyPar, &ulSequence, &attr, &tkOwner, NULL, wzArgName, UNIBUF_SIZE / 2, &chName); hEnumTyParConstraint = NULL; if (FAILED(g_pPubImport->EnumGenericParamConstraints(&hEnumTyParConstraint, tkTyPar, tkConstraint, 2048, &NumConstraints))) { g_pPubImport->CloseEnum(hEnumTyPar); return; } if (NumConstraints > 0) { CQuickBytes out; mdToken tkConstraintType; mdToken tkGenericParam; ULONG ulSequence; for (DWORD ix = 0; ix < NumConstraints; ix++) { mdGenericParamConstraint tkParamConstraint = tkConstraint[ix]; if (FAILED(g_pPubImport->GetGenericParamConstraintProps(tkParamConstraint, &tkGenericParam, &tkConstraintType))) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: MetaData error in GetGenericParamConstraintProps for %08X", g_szAsmCodeIndent, tkParamConstraint); return; } if (FAILED(g_pImport->EnumInit(mdtCustomAttribute, tkParamConstraint, &hEnum))) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: MetaData error enumerating CustomAttribute for mdGenericParamConstraint %08X", g_szAsmCodeIndent, tkParamConstraint); printLine(GUICookie, szString); return; } ulCAs = g_pImport->EnumGetCount(&hEnum); if (ulCAs) { char *szptr = &szString[0]; szptr += sprintf_s(szptr, SZSTRING_SIZE, "%s%s ", g_szAsmCodeIndent, KEYWORD(".param constraint")); if (FAILED(g_pPubImport->GetGenericParamProps(tkGenericParam, &ulSequence, &attr, NULL, NULL, wzArgName, UNIBUF_SIZE / 2, &chName))) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: MetaData error in GetGenericParamProps for %08X", g_szAsmCodeIndent, tkGenericParam); printLine(GUICookie, szString); return; } if (chName > 0) { char* sz = (char*)(&wzUniBuf[UNIBUF_SIZE / 2]); WszWideCharToMultiByte(CP_UTF8, 0, wzArgName, -1, sz, UNIBUF_SIZE, NULL, NULL); szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), " %s", ProperName(sz)); } else { szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), " [%d]", ulSequence + 1); } if (g_fDumpTokens) { szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), COMMENT("/*%08X*/ "), tkGenericParam); } szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), ", "); out.Shrink(0); szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), "%s", PrettyPrintClass(&out, tkConstraintType, g_pImport)); printLine(GUICookie, szString); strcat_s(g_szAsmCodeIndent, MAX_MEMBER_LENGTH, " "); while (g_pImport->EnumNext(&hEnum, &tkCA) && RidFromToken(tkCA)) { DumpCustomAttribute(tkCA, GUICookie, false); } g_szAsmCodeIndent[strlen(g_szAsmCodeIndent) - 2] = 0; } g_pImport->EnumClose(&hEnum); // mdtCustomAttribute } } } //end for(i=0;... } //end if(g_fShowCA) } // Sets *pbOverridingTypeSpec to TRUE if we are overriding a method declared by a type spec or // if the method has a signature which does not exactly match between the overrider and overridee. // That case is commonly caused by covariant overrides. // In that case the syntax is slightly different (there are additional 'method' keywords). // Refer to Expert .NET 2.0 IL Assembler page 242. void PrettyPrintOverrideDecl(ULONG i, __inout __nullterminated char* szString, void* GUICookie, mdToken tkOverrider, BOOL *pbOverridingTypeSpec) { const char * pszMemberName; mdToken tkDecl,tkDeclParent=0; char szBadToken[256]; char pszTailSigDefault[] = ""; char* pszTailSig = pszTailSigDefault; CQuickBytes qbInstSig; char* szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".override")); tkDecl = (*g_pmi_list)[i].tkDecl; *pbOverridingTypeSpec = FALSE; if(g_pImport->IsValidToken(tkDecl)) { bool needsFullTokenPrint = false; bool hasTkDeclParent = false; // Determine if the decl is a typespec method, in which case the "method" syntax + full token print // must be used to generate the disassembly. if(SUCCEEDED(g_pImport->GetParentToken(tkDecl,&tkDeclParent))) { if(g_pImport->IsValidToken(tkDeclParent)) { if(TypeFromToken(tkDeclParent) == mdtMethodDef) //get the parent's parent { mdTypeRef cr1; if(FAILED(g_pImport->GetParentToken(tkDeclParent,&cr1))) cr1 = mdTypeRefNil; tkDeclParent = cr1; } if(RidFromToken(tkDeclParent)) { if(TypeFromToken(tkDeclParent)==mdtTypeSpec) { needsFullTokenPrint = true; } hasTkDeclParent = true; } } else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s",ERRORMSG("INVALID OVERRIDDEN METHOD'S PARENT TOKEN")); } // Determine if the sig of the decl does not match the sig of the body // In that case the full "method" syntax must be used if ((TypeFromToken(tkOverrider) == mdtMethodDef) && !needsFullTokenPrint) { PCCOR_SIGNATURE pComSigDecl = NULL; ULONG cComSigDecl = 0; mdToken tkDeclSigTok = tkDecl; bool successfullyGotDeclSig = false; bool successfullyGotBodySig = false; if (TypeFromToken(tkDeclSigTok) == mdtMethodSpec) { mdToken meth=0; if (SUCCEEDED(g_pImport->GetMethodSpecProps(tkDeclSigTok, &meth, NULL, NULL))) { tkDeclSigTok = meth; } } if (TypeFromToken(tkDeclSigTok) == mdtMethodDef) { if (SUCCEEDED(g_pImport->GetSigOfMethodDef(tkDeclSigTok, &cComSigDecl, &pComSigDecl))) { successfullyGotDeclSig = true; } } else if (TypeFromToken(tkDeclSigTok) == mdtMemberRef) { const char *pszMemberNameUnused; if (SUCCEEDED(g_pImport->GetNameAndSigOfMemberRef( tkDeclSigTok, &pComSigDecl, &cComSigDecl, &pszMemberNameUnused))) { successfullyGotDeclSig = true; } } PCCOR_SIGNATURE pComSigBody; ULONG cComSigBody; if (SUCCEEDED(g_pImport->GetSigOfMethodDef(tkOverrider, &cComSigBody, &pComSigBody))) { successfullyGotBodySig = true; } if (successfullyGotDeclSig && successfullyGotBodySig) { if (cComSigBody != cComSigDecl) { needsFullTokenPrint = true; } else if (memcmp(pComSigBody, pComSigDecl, cComSigBody) != 0) { needsFullTokenPrint = true; } // Signature are binary identical, full sig printing not needed } else { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s",ERRORMSG("INVALID BODY OR DECL SIG")); } } if (needsFullTokenPrint) { // In this case, the shortcut syntax cannot be used, and a full token must be printed. // Print the full token and return. szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), " %s ",KEYWORD("method")); PrettyPrintToken(szString,tkDecl,g_pImport,GUICookie,tkOverrider); *pbOverridingTypeSpec = TRUE; return; } if (hasTkDeclParent) { // If the tkDeclParent was successfully retrieved during parent discovery print it here. PrettyPrintToken(szString, tkDeclParent, g_pImport,GUICookie,tkOverrider); strcat_s(szString, SZSTRING_SIZE,"::"); szptr = &szString[strlen(szString)]; } if(TypeFromToken(tkDecl) == mdtMethodSpec) { mdToken meth=0; PCCOR_SIGNATURE pSig=NULL; ULONG cSig=0; if (FAILED(g_pImport->GetMethodSpecProps(tkDecl, &meth, &pSig, &cSig))) { meth = mdTokenNil; pSig = NULL; cSig = 0; } if (pSig && cSig) { qbInstSig.Shrink(0); pszTailSig = (char*)PrettyPrintSig(pSig, cSig, "", &qbInstSig, g_pImport, NULL); } tkDecl = meth; } if(TypeFromToken(tkDecl) == mdtMethodDef) { if (FAILED(g_pImport->GetNameOfMethodDef(tkDecl, &pszMemberName))) { sprintf_s(szBadToken,256,ERRORMSG("INVALID RECORD: 0x%8.8X"),tkDecl); pszMemberName = (const char *)szBadToken; } } else if(TypeFromToken(tkDecl) == mdtMemberRef) { PCCOR_SIGNATURE pComSig; ULONG cComSig; if (FAILED(g_pImport->GetNameAndSigOfMemberRef( tkDecl, &pComSig, &cComSig, &pszMemberName))) { sprintf_s(szBadToken,256,ERRORMSG("INVALID RECORD: 0x%8.8X"),tkDecl); pszMemberName = (const char *)szBadToken; } } else { sprintf_s(szBadToken,256,ERRORMSG("INVALID TOKEN: 0x%8.8X"),tkDecl); pszMemberName = (const char*)szBadToken; } MAKE_NAME_IF_NONE(pszMemberName,tkDecl); } else { sprintf_s(szBadToken,256,ERRORMSG("INVALID TOKEN: 0x%8.8X"),tkDecl); pszMemberName = (const char*)szBadToken; } szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s%s",ProperName((char*)pszMemberName),pszTailSig); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT(" /*%08X::%08X*/ "),tkDeclParent,(*g_pmi_list)[i].tkDecl); } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:21000) // Suppress PREFast warning about overly large function #endif BOOL DumpMethod(mdToken FuncToken, const char *pszClassName, DWORD dwEntryPointToken,void *GUICookie,BOOL DumpBody) { const char *pszMemberName = NULL;//[MAX_MEMBER_LENGTH]; const char *pszMemberSig = NULL; DWORD dwAttrs = 0; DWORD dwImplAttrs; DWORD dwOffset; DWORD dwTargetRVA; CQuickBytes qbMemberSig; PCCOR_SIGNATURE pComSig = NULL; ULONG cComSig; char *buff = NULL;//[MAX_MEMBER_LENGTH]; ParamDescriptor* pszArgname = NULL; ULONG ulArgs=0; unsigned retParamIx = 0; unsigned uStringLen = SZSTRING_SIZE; char szArgPrefix[MAX_PREFIX_SIZE]; char* szptr = NULL; mdToken tkMVarOwner = g_tkMVarOwner; if (FAILED(g_pImport->GetMethodDefProps(FuncToken, &dwAttrs))) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: MethodDef %08X has wrong record", g_szAsmCodeIndent, FuncToken); printError(GUICookie, ERRORMSG(szString)); return FALSE; } if (g_fLimitedVisibility) { if(g_fHidePub && IsMdPublic(dwAttrs)) return FALSE; if(g_fHidePriv && IsMdPrivate(dwAttrs)) return FALSE; if(g_fHideFam && IsMdFamily(dwAttrs)) return FALSE; if(g_fHideAsm && IsMdAssem(dwAttrs)) return FALSE; if(g_fHideFOA && IsMdFamORAssem(dwAttrs)) return FALSE; if(g_fHideFAA && IsMdFamANDAssem(dwAttrs)) return FALSE; if(g_fHidePrivScope && IsMdPrivateScope(dwAttrs)) return FALSE; } if (FAILED(g_pImport->GetMethodImplProps(FuncToken, &dwOffset, &dwImplAttrs))) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: Invalid MethodImpl %08X record", g_szAsmCodeIndent, FuncToken); printError(GUICookie, ERRORMSG(szString)); return FALSE; } if (FAILED(g_pImport->GetNameOfMethodDef(FuncToken, &pszMemberName))) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: MethodDef %08X has wrong record", g_szAsmCodeIndent, FuncToken); printError(GUICookie, ERRORMSG(szString)); return FALSE; } MAKE_NAME_IF_NONE(pszMemberName,FuncToken); if (FAILED(g_pImport->GetSigOfMethodDef(FuncToken, &cComSig, &pComSig))) { pComSig = NULL; } if (cComSig == NULL) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: method '%s' has no signature", g_szAsmCodeIndent, pszMemberName); printError(GUICookie, ERRORMSG(szString)); return FALSE; } bool bRet = FALSE; PAL_CPP_TRY { g_tkMVarOwner = FuncToken; szString[0] = 0; DumpGenericPars(szString,FuncToken); //,NULL,FALSE); pszMemberSig = PrettyPrintSig(pComSig, cComSig, szString, &qbMemberSig, g_pImport,NULL); } PAL_CPP_CATCH_ALL { printError(GUICookie,"INVALID DATA ADDRESS"); bRet = TRUE; } PAL_CPP_ENDTRY; if (bRet) { g_tkMVarOwner = tkMVarOwner; return FALSE; } if (g_Mode == MODE_DUMP_CLASS_METHOD || g_Mode == MODE_DUMP_CLASS_METHOD_SIG) { if (strcmp(pszMemberName, g_pszMethodToDump) != 0) return FALSE; if (g_Mode == MODE_DUMP_CLASS_METHOD_SIG) { // we want plain signature without token values const char *pszPlainSig; if (g_fDumpTokens) { // temporarily disable token dumping g_fDumpTokens = FALSE; PAL_CPP_TRY { CQuickBytes qbTempSig; pszPlainSig = PrettyPrintSig(pComSig, cComSig, "", &qbTempSig, g_pImport, NULL); } PAL_CPP_CATCH_ALL { pszPlainSig = ""; } PAL_CPP_ENDTRY; g_fDumpTokens = TRUE; } else { pszPlainSig = pszMemberSig; } if (strcmp(pszPlainSig, g_pszSigToDump) != 0) return FALSE; } } if(!DumpBody) { printLine(GUICookie,(char*)pszMemberSig); g_tkMVarOwner = tkMVarOwner; return TRUE; } szptr = &szString[0]; szString[0] = 0; if(DumpBody) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s%s ",g_szAsmCodeIndent,ANCHORPT(KEYWORD(".method"),FuncToken)); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s ",ANCHORPT(KEYWORD(".method"),FuncToken)); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),FuncToken); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)0)); if(IsMdPublic(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"public "); if(IsMdPrivate(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"private "); if(IsMdFamily(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"family "); if(IsMdAssem(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"assembly "); if(IsMdFamANDAssem(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"famandassem "); if(IsMdFamORAssem(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"famorassem "); if(IsMdPrivateScope(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"privatescope "); if(IsMdHideBySig(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"hidebysig "); if(IsMdNewSlot(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"newslot "); if(IsMdSpecialName(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"specialname "); if(IsMdRTSpecialName(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"rtspecialname "); if (IsMdStatic(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"static "); if (IsMdAbstract(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"abstract "); if (dwAttrs & 0x00000200) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"strict "); if (IsMdVirtual(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"virtual "); if (IsMdFinal(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"final "); if (IsMdUnmanagedExport(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"unmanagedexp "); if(IsMdRequireSecObject(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"reqsecobj "); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)-1)); if (IsMdPinvokeImpl(dwAttrs)) { DWORD dwMappingFlags; const char *szImportName; mdModuleRef mrImportDLL; szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s(",KEYWORD("pinvokeimpl")); if(FAILED(g_pImport->GetPinvokeMap(FuncToken,&dwMappingFlags, &szImportName,&mrImportDLL))) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/* No map */")); else szptr=DumpPinvokeMap(dwMappingFlags, (strcmp(szImportName,pszMemberName)? szImportName : NULL), mrImportDLL,szString,GUICookie); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),") "); } // A little hack to get the formatting we need for Assem. buff = new char[SZSTRING_SIZE]; if(buff==NULL) { printError(GUICookie,"Out of memory"); g_tkMVarOwner = tkMVarOwner; return FALSE; } g_fThisIsInstanceMethod = !IsMdStatic(dwAttrs); { const char *psz = NULL; if(IsMdPrivateScope(dwAttrs)) sprintf_s(buff,SZSTRING_SIZE,"%s$PST%08X", pszMemberName,FuncToken ); else strcpy_s(buff,SZSTRING_SIZE, pszMemberName ); psz = ProperName(buff); if(psz != buff) { strcpy_s(buff,SZSTRING_SIZE,psz); } } DumpGenericPars(buff, FuncToken); //, NULL, FALSE); qbMemberSig.Shrink(0); // Get the argument names, if any strcpy_s(szArgPrefix,MAX_PREFIX_SIZE,(g_fThisIsInstanceMethod ? "A1": "A0")); { PCCOR_SIGNATURE typePtr = pComSig; unsigned ulCallConv = CorSigUncompressData(typePtr); // get the calling convention out of the way if (ulCallConv & IMAGE_CEE_CS_CALLCONV_GENERIC) CorSigUncompressData(typePtr); // get the num of generic args out of the way unsigned numArgs = CorSigUncompressData(typePtr)+1; HENUMInternal hArgEnum; mdParamDef tkArg; if (FAILED(g_pImport->EnumInit(mdtParamDef,FuncToken,&hArgEnum))) { printError(GUICookie, "Invalid MetaDataFormat"); g_tkMVarOwner = tkMVarOwner; return FALSE; } ulArgs = g_pImport->EnumGetCount(&hArgEnum); retParamIx = numArgs-1; if (ulArgs < numArgs) ulArgs = numArgs; if (ulArgs != 0) { pszArgname = new ParamDescriptor[ulArgs+2]; memset(pszArgname,0,(ulArgs+2)*sizeof(ParamDescriptor)); LPCSTR szName; ULONG ulSequence, ix; USHORT wSequence; DWORD dwAttr; ULONG j; for (j=0; g_pImport->EnumNext(&hArgEnum,&tkArg) && RidFromToken(tkArg); j++) { if (FAILED(g_pImport->GetParamDefProps(tkArg, &wSequence, &dwAttr, &szName))) { char sz[256]; sprintf_s(sz, ARRAY_SIZE(sz), RstrUTF(IDS_E_INVALIDRECORD), tkArg); printError(GUICookie, sz); continue; } ulSequence = wSequence; if (ulSequence > ulArgs+1) { char sz[256]; sprintf_s(sz,256,RstrUTF(IDS_E_PARAMSEQNO),j,ulSequence,ulSequence); printError(GUICookie,sz); } else { ix = retParamIx; if (ulSequence != 0) { ix = ulSequence-1; if (*szName != 0) { pszArgname[ix].name = new char[strlen(szName)+1]; strcpy_s(pszArgname[ix].name,strlen(szName)+1,szName); } } pszArgname[ix].attr = dwAttr; pszArgname[ix].tok = tkArg; } }// end for( along the params) for (j=0; j <numArgs; j++) { if(pszArgname[j].name == NULL) // we haven't got the name! { pszArgname[j].name = new char[16]; *pszArgname[j].name = 0; } if(*pszArgname[j].name == 0) // we haven't got the name! { sprintf_s(pszArgname[j].name,16,"A_%d",g_fThisIsInstanceMethod ? j+1 : j); } }// end for( along the argnames) sprintf_s(szArgPrefix,MAX_PREFIX_SIZE,"@%Id0",(size_t)pszArgname); } //end if (ulArgs) g_pImport->EnumClose(&hArgEnum); } g_tkRefUser = FuncToken; PrettyPrintMethodSig(szString, &uStringLen, &qbMemberSig, pComSig, cComSig, buff, szArgPrefix, GUICookie); g_tkRefUser = 0; szptr = &szString[strlen(szString)]; szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)0)); if(IsMiNative(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," native"); if(IsMiIL(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," cil"); if(IsMiOPTIL(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," optil"); if(IsMiRuntime(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," runtime"); if(IsMiUnmanaged(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," unmanaged"); if(IsMiManaged(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," managed"); if(IsMiPreserveSig(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," preservesig"); if(IsMiForwardRef(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," forwardref"); if(IsMiInternalCall(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," internalcall"); if(IsMiSynchronized(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," synchronized"); if(IsMiNoInlining(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," noinlining"); if(IsMiAggressiveInlining(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," aggressiveinlining"); if(IsMiNoOptimization(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," nooptimization"); if(IsMiAggressiveOptimization(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," aggressiveoptimization"); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)-1)); printLine(GUICookie, szString); VDELETE(buff); if(!DumpBody) { g_tkMVarOwner = tkMVarOwner; return TRUE; } if(g_fShowBytes) { if (FAILED(g_pImport->GetSigOfMethodDef(FuncToken, &cComSig, &pComSig))) { sprintf_s(szString,SZSTRING_SIZE,"%sERROR: method %08X has wrong record",g_szAsmCodeIndent,FuncToken); printError(GUICookie,ERRORMSG(szString)); return FALSE; } const char* szt = "SIG:"; for(ULONG i=0; i<cComSig;) { szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s// %s", g_szAsmCodeIndent, szt); while(i<cComSig) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," %02X",pComSig[i]); i++; if((i & 0x1F)==0) break; // print only 32 per line } printLine(GUICookie, COMMENT(szString)); szt = " "; } } szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s", g_szAsmCodeIndent,SCOPE()); printLine(GUICookie, szString); szptr = &szString[0]; strcat_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH," "); // We have recoreded the entry point token from the CLR Header. Check to see if this // method is the entry point. if(FuncToken == static_cast<mdToken>(dwEntryPointToken)) { sprintf_s(szString,SZSTRING_SIZE,"%s%s", g_szAsmCodeIndent,KEYWORD(".entrypoint")); printLine(GUICookie, szString); } DumpCustomAttributes(FuncToken,GUICookie); DumpGenericParsCA(FuncToken,GUICookie); DumpParams(pszArgname, retParamIx, GUICookie); DumpPermissions(FuncToken,GUICookie); // Check if the method represents entry in VTable fixups and in EATable { ULONG j; for(j=0; j<g_nVTableRef; j++) { if((*g_prVTableRef)[j].tkTok == FuncToken) { sprintf_s(szString,SZSTRING_SIZE,"%s%s %d : %d", g_szAsmCodeIndent,KEYWORD(".vtentry"),(*g_prVTableRef)[j].wEntry+1,(*g_prVTableRef)[j].wSlot+1); printLine(GUICookie, szString); break; } } for(j=0; j<g_nEATableRef; j++) { if((*g_prEATableRef)[j].tkTok == FuncToken) { szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s [%d] ", g_szAsmCodeIndent,KEYWORD(".export"),j+g_nEATableBase); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s %s", KEYWORD("as"), ProperName((*g_prEATableRef)[j].pszName)); printLine(GUICookie, szString); break; } } } // Dump method impls of this method: for(ULONG i = 0; i < g_NumMI; i++) { if((*g_pmi_list)[i].tkBody == FuncToken) { BOOL bOverridingTypeSpec; PrettyPrintOverrideDecl(i,szString,GUICookie,FuncToken,&bOverridingTypeSpec); printLine(GUICookie,szString); } } dwTargetRVA = dwOffset; if (IsMdPinvokeImpl(dwAttrs)) { if(dwOffset) { sprintf_s(szString,SZSTRING_SIZE,"%s// Embedded native code",g_szAsmCodeIndent); printLine(GUICookie, COMMENT(szString)); goto ItsMiNative; } if(g_szAsmCodeIndent[0]) g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-2] = 0; sprintf_s(szString,SZSTRING_SIZE,"%s%s",g_szAsmCodeIndent,UNSCOPE()); printLine(GUICookie, szString); g_tkMVarOwner = tkMVarOwner; return TRUE; } if(IsMiManaged(dwImplAttrs)) { if(IsMiIL(dwImplAttrs) || IsMiOPTIL(dwImplAttrs)) { if(g_fShowBytes) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_METHBEG), g_szAsmCodeIndent,dwTargetRVA); printLine(GUICookie, COMMENT(szString)); } szString[0] = 0; if (dwTargetRVA != 0) { void* newTarget = NULL; if(g_pPELoader->getVAforRVA(dwTargetRVA,&newTarget)) { DisassembleWrapper(g_pImport, (unsigned char*)newTarget, GUICookie, FuncToken,pszArgname, ulArgs); } else { sprintf_s(szString,SZSTRING_SIZE, "INVALID METHOD ADDRESS: 0x%8.8zX (RVA: 0x%8.8X)",(size_t)newTarget,dwTargetRVA); printError(GUICookie,szString); } } } else if(IsMiNative(dwImplAttrs)) { ItsMiNative: sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_DASMNATIVE), g_szAsmCodeIndent); printLine(GUICookie, COMMENT(szString)); sprintf_s(szString,SZSTRING_SIZE,"%s// Managed TargetRVA = 0x%8.8X", g_szAsmCodeIndent, dwTargetRVA); printLine(GUICookie, COMMENT(szString)); } } else if(IsMiUnmanaged(dwImplAttrs)&&IsMiNative(dwImplAttrs)) { _ASSERTE(IsMiNative(dwImplAttrs)); sprintf_s(szString,SZSTRING_SIZE,"%s// Unmanaged TargetRVA = 0x%8.8X", g_szAsmCodeIndent, dwTargetRVA); printLine(GUICookie, COMMENT(szString)); } else if(IsMiRuntime(dwImplAttrs)) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_METHODRT), g_szAsmCodeIndent); printLine(GUICookie, COMMENT(szString)); } #ifdef _DEBUG else _ASSERTE(!"Bad dwImplAttrs"); #endif if(g_szAsmCodeIndent[0]) g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-2] = 0; { szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,UNSCOPE()); if(pszClassName) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("// end of method %s::"), ProperName((char*)pszClassName)); strcpy_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT(ProperName((char*)pszMemberName))); } else sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("// end of global method %s"), ProperName((char*)pszMemberName)); } printLine(GUICookie, szString); szString[0] = 0; printLine(GUICookie, szString); if(pszArgname) { for(ULONG i=0; i < ulArgs; i++) { if(pszArgname[i].name) VDELETE(pszArgname[i].name); } VDELETE(pszArgname); } g_tkMVarOwner = tkMVarOwner; return TRUE; } #ifdef _PREFAST_ #pragma warning(pop) #endif BOOL DumpField(mdToken FuncToken, const char *pszClassName,void *GUICookie, BOOL DumpBody) { char *pszMemberName = NULL;//[MAX_MEMBER_LENGTH]; DWORD dwAttrs = 0; CQuickBytes qbMemberSig; PCCOR_SIGNATURE pComSig = NULL; ULONG cComSig; const char *szStr = NULL;//[1024]; char* szptr; const char *psz; if (FAILED(g_pImport->GetNameOfFieldDef(FuncToken, &psz))) { char sz[2048]; sprintf_s(sz, 2048, "%sERROR: FieldDef %08X has no signature", g_szAsmCodeIndent, FuncToken); printError(GUICookie, sz); return FALSE; } MAKE_NAME_IF_NONE(psz,FuncToken); if (FAILED(g_pImport->GetFieldDefProps(FuncToken, &dwAttrs))) { char sz[2048]; sprintf_s(sz, 2048, "%sERROR: FieldDef %08X record error", g_szAsmCodeIndent, FuncToken); printError(GUICookie, sz); return FALSE; } if (g_fLimitedVisibility) { if(g_fHidePub && IsFdPublic(dwAttrs)) return FALSE; if(g_fHidePriv && IsFdPrivate(dwAttrs)) return FALSE; if(g_fHideFam && IsFdFamily(dwAttrs)) return FALSE; if(g_fHideAsm && IsFdAssembly(dwAttrs)) return FALSE; if(g_fHideFOA && IsFdFamORAssem(dwAttrs)) return FALSE; if(g_fHideFAA && IsFdFamANDAssem(dwAttrs)) return FALSE; if(g_fHidePrivScope && IsFdPrivateScope(dwAttrs)) return FALSE; } { const char* psz1 = NULL; if(IsFdPrivateScope(dwAttrs)) { pszMemberName = new char[strlen(psz)+15]; sprintf_s(pszMemberName,strlen(psz)+15,"%s$PST%08X", psz,FuncToken ); } else { pszMemberName = new char[strlen(psz)+3]; strcpy_s(pszMemberName, strlen(psz)+3, psz ); } psz1 = ProperName(pszMemberName); VDELETE(pszMemberName); pszMemberName = new char[strlen(psz1)+1]; strcpy_s(pszMemberName,strlen(psz1)+1,psz1); } if (FAILED(g_pImport->GetSigOfFieldDef(FuncToken, &cComSig, &pComSig))) { pComSig = NULL; } if (cComSig == NULL) { char sz[2048]; sprintf_s(sz,2048,"%sERROR: field '%s' has no signature",g_szAsmCodeIndent,pszMemberName); VDELETE(pszMemberName); printError(GUICookie,sz); return FALSE; } g_tkRefUser = FuncToken; bool bRet = FALSE; PAL_CPP_TRY { szStr = PrettyPrintSig(pComSig, cComSig, (DumpBody ? pszMemberName : ""), &qbMemberSig, g_pImport,NULL); } PAL_CPP_CATCH_ALL { printError(GUICookie,"INVALID ADDRESS IN FIELD SIGNATURE"); bRet = TRUE; } PAL_CPP_ENDTRY; if (bRet) return FALSE; g_tkRefUser = 0; if (g_Mode == MODE_DUMP_CLASS_METHOD || g_Mode == MODE_DUMP_CLASS_METHOD_SIG) { if (strcmp(pszMemberName, g_pszMethodToDump) != 0) { VDELETE(pszMemberName); return FALSE; } if (g_Mode == MODE_DUMP_CLASS_METHOD_SIG) { // we want plain signature without token values and without the field name BOOL fDumpTokens = g_fDumpTokens; g_fDumpTokens = FALSE; const char *pszPlainSig; PAL_CPP_TRY { CQuickBytes qbTempSig; pszPlainSig = PrettyPrintSig(pComSig, cComSig, "", &qbTempSig, g_pImport, NULL); } PAL_CPP_CATCH_ALL { pszPlainSig = ""; } PAL_CPP_ENDTRY; g_fDumpTokens = fDumpTokens; if (strcmp(pszPlainSig, g_pszSigToDump) != 0) { VDELETE(pszMemberName); return FALSE; } } } VDELETE(pszMemberName); szptr = &szString[0]; if(DumpBody) { szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ", g_szAsmCodeIndent,ANCHORPT(KEYWORD(".field"),FuncToken)); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),FuncToken); } // put offset (if any) for(ULONG i=0; i < g_cFieldOffsets; i++) { if(g_rFieldOffset[i].ridOfField == FuncToken) { if(g_rFieldOffset[i].ulOffset != 0xFFFFFFFF) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"[%d] ",g_rFieldOffset[i].ulOffset); break; } } szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)0)); if(IsFdPublic(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"public "); if(IsFdPrivate(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"private "); if(IsFdStatic(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"static "); if(IsFdFamily(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"family "); if(IsFdAssembly(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"assembly "); if(IsFdFamANDAssem(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"famandassem "); if(IsFdFamORAssem(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"famorassem "); if(IsFdPrivateScope(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"privatescope "); if(IsFdInitOnly(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"initonly "); if(IsFdLiteral(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"literal "); if(IsFdNotSerialized(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"notserialized "); if(IsFdSpecialName(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"specialname "); if(IsFdRTSpecialName(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"rtspecialname "); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)-1)); if (IsFdPinvokeImpl(dwAttrs)) { DWORD dwMappingFlags; const char *szImportName; mdModuleRef mrImportDLL; szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s(",KEYWORD("pinvokeimpl")); if(FAILED(g_pImport->GetPinvokeMap(FuncToken,&dwMappingFlags, &szImportName,&mrImportDLL))) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/* No map */")); else szptr = DumpPinvokeMap(dwMappingFlags, (strcmp(szImportName,psz)? szImportName : NULL), mrImportDLL, szString,GUICookie); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),") "); } szptr = DumpMarshaling(g_pImport,szString,SZSTRING_SIZE,FuncToken); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s",szStr); if (IsFdHasFieldRVA(dwAttrs)) // Do we have an RVA associated with this? { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), KEYWORD(" at ")); ULONG fieldRVA; if (SUCCEEDED(g_pImport->GetFieldRVA(FuncToken, &fieldRVA))) { szptr = DumpDataPtr(&szString[strlen(szString)], fieldRVA, SizeOfField(FuncToken,g_pImport)); } else { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),RstrUTF(IDS_E_NORVA)); } } // dump default value (if any): if(IsFdHasDefault(dwAttrs) && DumpBody) DumpDefaultValue(FuncToken,szString,GUICookie); printLine(GUICookie, szString); if(DumpBody) { DumpCustomAttributes(FuncToken,GUICookie); DumpPermissions(FuncToken,GUICookie); } return TRUE; } BOOL DumpEvent(mdToken FuncToken, const char *pszClassName, DWORD dwClassAttrs, void *GUICookie, BOOL DumpBody) { DWORD dwAttrs; mdToken tkEventType; LPCSTR psz; HENUMInternal hAssoc; ASSOCIATE_RECORD rAssoc[128]; CQuickBytes qbMemberSig; ULONG nAssoc; char* szptr; if (FAILED(g_pImport->GetEventProps(FuncToken,&psz,&dwAttrs,&tkEventType))) { char sz[2048]; sprintf_s(sz, 2048, "%sERROR: Invalid Event %08X record", g_szAsmCodeIndent, FuncToken); printError(GUICookie, sz); return FALSE; } MAKE_NAME_IF_NONE(psz,FuncToken); if (g_Mode == MODE_DUMP_CLASS_METHOD || g_Mode == MODE_DUMP_CLASS_METHOD_SIG) { if (strcmp(psz, g_pszMethodToDump) != 0) return FALSE; } if (FAILED(g_pImport->EnumAssociateInit(FuncToken,&hAssoc))) { char sz[2048]; sprintf_s(sz, 2048, "%sERROR: MetaData error enumerating Associate for %08X", g_szAsmCodeIndent, FuncToken); printError(GUICookie, sz); return FALSE; } if ((nAssoc = hAssoc.m_ulCount)) { memset(rAssoc,0,sizeof(rAssoc)); if (FAILED(g_pImport->GetAllAssociates(&hAssoc,rAssoc,nAssoc))) { char sz[2048]; sprintf_s(sz, 2048, "%sERROR: MetaData error enumerating all Associates", g_szAsmCodeIndent); printError(GUICookie, sz); return FALSE; } if (g_fLimitedVisibility) { unsigned i; for (i=0; i < nAssoc;i++) { if ((TypeFromToken(rAssoc[i].m_memberdef) == mdtMethodDef) && g_pImport->IsValidToken(rAssoc[i].m_memberdef)) { DWORD dwMethodAttrs; if (FAILED(g_pImport->GetMethodDefProps(rAssoc[i].m_memberdef, &dwMethodAttrs))) { continue; } if(g_fHidePub && IsMdPublic(dwMethodAttrs)) continue; if(g_fHidePriv && IsMdPrivate(dwMethodAttrs)) continue; if(g_fHideFam && IsMdFamily(dwMethodAttrs)) continue; if(g_fHideAsm && IsMdAssem(dwMethodAttrs)) continue; if(g_fHideFOA && IsMdFamORAssem(dwMethodAttrs)) continue; if(g_fHideFAA && IsMdFamANDAssem(dwMethodAttrs)) continue; if(g_fHidePrivScope && IsMdPrivateScope(dwMethodAttrs)) continue; break; } } if (i >= nAssoc) return FALSE; } } szptr = &szString[0]; if (DumpBody) { szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ", g_szAsmCodeIndent,KEYWORD(".event")); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),FuncToken); } else { szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s : ",ProperName((char*)psz)); } if(IsEvSpecialName(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("specialname ")); if(IsEvRTSpecialName(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("rtspecialname ")); if(RidFromToken(tkEventType)&&g_pImport->IsValidToken(tkEventType)) { switch(TypeFromToken(tkEventType)) { case mdtTypeRef: case mdtTypeDef: case mdtTypeSpec: { PrettyPrintToken(szString, tkEventType, g_pImport,GUICookie,0); szptr = &szString[strlen(szString)]; } break; default: break; } } if(!DumpBody) { printLine(GUICookie,szString); return TRUE; } szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," %s", ProperName((char*)psz)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"%s%s",g_szAsmCodeIndent,SCOPE()); printLine(GUICookie,szString); strcat_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH," "); DumpCustomAttributes(FuncToken,GUICookie); DumpPermissions(FuncToken,GUICookie); if(nAssoc) { for(unsigned i=0; i < nAssoc;i++) { mdToken tk = rAssoc[i].m_memberdef; DWORD sem = rAssoc[i].m_dwSemantics; szptr = &szString[0]; if(IsMsAddOn(sem)) szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".addon")); else if(IsMsRemoveOn(sem)) szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".removeon")); else if(IsMsFire(sem)) szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".fire")); else if(IsMsOther(sem)) szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".other")); else szptr+=sprintf_s(szptr,SZSTRING_SIZE,ERRORMSG("UNKNOWN SEMANTICS: 0x%X "),sem); if(g_pImport->IsValidToken(tk)) PrettyPrintToken(szString, tk, g_pImport,GUICookie,0); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),ERRORMSG("INVALID TOKEN 0x%8.8X"),tk); printLine(GUICookie,szString); } } if(g_szAsmCodeIndent[0]) g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-2] = 0; szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,UNSCOPE()); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("// end of event %s::"),ProperName((char*)pszClassName)); strcpy_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT(ProperName((char*)psz))); printLine(GUICookie,szString); return TRUE; } BOOL DumpProp(mdToken FuncToken, const char *pszClassName, DWORD dwClassAttrs, void *GUICookie, BOOL DumpBody) { DWORD dwAttrs; LPCSTR psz; HENUMInternal hAssoc; ASSOCIATE_RECORD rAssoc[128]; CQuickBytes qbMemberSig; PCCOR_SIGNATURE pComSig; ULONG cComSig, nAssoc; unsigned uStringLen = SZSTRING_SIZE; char* szptr; if (FAILED(g_pImport->GetPropertyProps(FuncToken,&psz,&dwAttrs,&pComSig,&cComSig))) { char sz[2048]; sprintf_s(sz, 2048, "%sERROR: Invalid Property %08X record", g_szAsmCodeIndent, FuncToken); printError(GUICookie, sz); return FALSE; } MAKE_NAME_IF_NONE(psz,FuncToken); if(cComSig == 0) { char sz[2048]; sprintf_s(sz,2048,"%sERROR: property '%s' has no signature",g_szAsmCodeIndent,psz); printError(GUICookie,sz); return FALSE; } if (g_Mode == MODE_DUMP_CLASS_METHOD || g_Mode == MODE_DUMP_CLASS_METHOD_SIG) { if (strcmp(psz, g_pszMethodToDump) != 0) return FALSE; } if (FAILED(g_pImport->EnumAssociateInit(FuncToken,&hAssoc))) { char sz[2048]; sprintf_s(sz, 2048, "%sERROR: MetaData error enumerating Associate for %08X", g_szAsmCodeIndent, FuncToken); printError(GUICookie, sz); return FALSE; } if ((nAssoc = hAssoc.m_ulCount) != 0) { memset(rAssoc,0,sizeof(rAssoc)); if (FAILED(g_pImport->GetAllAssociates(&hAssoc,rAssoc,nAssoc))) { char sz[2048]; sprintf_s(sz, 2048, "%sERROR: MetaData error enumerating all Associates", g_szAsmCodeIndent); printError(GUICookie, sz); return FALSE; } if (g_fLimitedVisibility) { unsigned i; for (i=0; i < nAssoc;i++) { if ((TypeFromToken(rAssoc[i].m_memberdef) == mdtMethodDef) && g_pImport->IsValidToken(rAssoc[i].m_memberdef)) { DWORD dwMethodAttrs; if (FAILED(g_pImport->GetMethodDefProps(rAssoc[i].m_memberdef, &dwMethodAttrs))) { continue; } if(g_fHidePub && IsMdPublic(dwMethodAttrs)) continue; if(g_fHidePriv && IsMdPrivate(dwMethodAttrs)) continue; if(g_fHideFam && IsMdFamily(dwMethodAttrs)) continue; if(g_fHideAsm && IsMdAssem(dwMethodAttrs)) continue; if(g_fHideFOA && IsMdFamORAssem(dwMethodAttrs)) continue; if(g_fHideFAA && IsMdFamANDAssem(dwMethodAttrs)) continue; if(g_fHidePrivScope && IsMdPrivateScope(dwMethodAttrs)) continue; break; } } if( i >= nAssoc) return FALSE; } } szptr = &szString[0]; if (DumpBody) { szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ", g_szAsmCodeIndent,KEYWORD(".property")); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),FuncToken); } else { szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s : ",ProperName((char*)psz)); } if(IsPrSpecialName(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("specialname ")); if(IsPrRTSpecialName(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("rtspecialname ")); { char pchDefault[] = ""; char *pch = pchDefault; if(DumpBody) { pch = szptr+1; strcpy_s(pch,SZSTRING_REMAINING_SIZE(pch),ProperName((char*)psz)); } qbMemberSig.Shrink(0); PrettyPrintMethodSig(szString, &uStringLen, &qbMemberSig, pComSig, cComSig, pch, NULL, GUICookie); if(IsPrHasDefault(dwAttrs) && DumpBody) DumpDefaultValue(FuncToken,szString,GUICookie); } printLine(GUICookie,szString); if(DumpBody) { sprintf_s(szString,SZSTRING_SIZE,"%s%s",g_szAsmCodeIndent,SCOPE()); printLine(GUICookie,szString); strcat_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH," "); DumpCustomAttributes(FuncToken,GUICookie); DumpPermissions(FuncToken,GUICookie); if(nAssoc) { for(unsigned i=0; i < nAssoc;i++) { mdToken tk = rAssoc[i].m_memberdef; DWORD sem = rAssoc[i].m_dwSemantics; szptr = &szString[0]; if(IsMsSetter(sem)) szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".set")); else if(IsMsGetter(sem)) szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".get")); else if(IsMsOther(sem)) szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".other")); else szptr+=sprintf_s(szptr,SZSTRING_SIZE,ERRORMSG("UNKNOWN SEMANTICS: 0x%X "),sem); if(g_pImport->IsValidToken(tk)) PrettyPrintToken(szString, tk, g_pImport,GUICookie,0); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),ERRORMSG("INVALID TOKEN 0x%8.8X"),tk); printLine(GUICookie,szString); } } if(g_szAsmCodeIndent[0]) g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-2] = 0; szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,UNSCOPE()); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("// end of property %s::"),ProperName((char*)pszClassName)); strcpy_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT(ProperName((char*)psz))); printLine(GUICookie,szString); } // end if(DumpBody) return TRUE; } BOOL DumpMembers(mdTypeDef cl, const char *pszClassNamespace, const char *pszClassName, DWORD dwClassAttrs, DWORD dwEntryPointToken, void* GUICookie) { HRESULT hr; mdToken *pMemberList = NULL; DWORD NumMembers, NumFields,NumMethods,NumEvents,NumProps; DWORD i; HENUMInternal hEnumMethod; HENUMInternal hEnumField; HENUMInternal hEnumEvent; HENUMInternal hEnumProp; CQuickBytes qbMemberSig; BOOL ret; // Get the total count of methods + fields hr = g_pImport->EnumInit(mdtMethodDef, cl, &hEnumMethod); if (FAILED(hr)) { FailedToEnum: printLine(GUICookie,RstrUTF(IDS_E_MEMBRENUM)); ret = FALSE; goto CloseHandlesAndReturn; } NumMembers = NumMethods = g_pImport->EnumGetCount(&hEnumMethod); if (FAILED(g_pImport->EnumInit(mdtFieldDef, cl, &hEnumField))) goto FailedToEnum; NumFields = g_pImport->EnumGetCount(&hEnumField); NumMembers += NumFields; if (FAILED(g_pImport->EnumInit(mdtEvent, cl, &hEnumEvent))) goto FailedToEnum; NumEvents = g_pImport->EnumGetCount(&hEnumEvent); NumMembers += NumEvents; if (FAILED(g_pImport->EnumInit(mdtProperty, cl, &hEnumProp))) goto FailedToEnum; NumProps = g_pImport->EnumGetCount(&hEnumProp); NumMembers += NumProps; ret = TRUE; if(NumMembers) { pMemberList = new (nothrow) mdToken[NumMembers]; if(pMemberList == NULL) ret = FALSE; } if ((NumMembers == 0)||(pMemberList == NULL)) goto CloseHandlesAndReturn; for (i = 0; g_pImport->EnumNext(&hEnumField, &pMemberList[i]); i++); for (; g_pImport->EnumNext(&hEnumMethod, &pMemberList[i]); i++); for (; g_pImport->EnumNext(&hEnumEvent, &pMemberList[i]); i++); for (; g_pImport->EnumNext(&hEnumProp, &pMemberList[i]); i++); _ASSERTE(i == NumMembers); for (i = 0; i < NumMembers; i++) { mdToken tk = pMemberList[i]; if(g_pImport->IsValidToken(tk)) { switch (TypeFromToken(tk)) { case mdtFieldDef: ret = DumpField(pMemberList[i], pszClassName, GUICookie,TRUE); break; case mdtMethodDef: ret = DumpMethod(pMemberList[i], pszClassName, dwEntryPointToken,GUICookie,TRUE); break; case mdtEvent: ret = DumpEvent(pMemberList[i], pszClassName, dwClassAttrs,GUICookie,TRUE); break; case mdtProperty: ret = DumpProp(pMemberList[i], pszClassName, dwClassAttrs,GUICookie,TRUE); break; default: { char szStr[4096]; sprintf_s(szStr,4096,RstrUTF(IDS_E_ODDMEMBER),pMemberList[i],pszClassName); printLine(GUICookie,szStr); } ret = FALSE; break; } // end switch } else { char szStr[256]; sprintf_s(szStr,256,ERRORMSG("INVALID MEMBER TOKEN: 0x%8.8X"),tk); printLine(GUICookie,szStr); ret= FALSE; } if(ret && (g_Mode == MODE_DUMP_CLASS_METHOD_SIG)) break; } // end for ret = TRUE; CloseHandlesAndReturn: g_pImport->EnumClose(&hEnumMethod); g_pImport->EnumClose(&hEnumField); g_pImport->EnumClose(&hEnumEvent); g_pImport->EnumClose(&hEnumProp); if(pMemberList) delete[] pMemberList; return ret; } BOOL GetClassLayout(mdTypeDef cl, ULONG* pulPackSize, ULONG* pulClassSize) { // Dump class layout HENUMInternal hEnumField; BOOL ret = FALSE; if(g_rFieldOffset) VDELETE(g_rFieldOffset); g_cFieldOffsets = 0; g_cFieldsMax = 0; if(RidFromToken(cl)==0) return TRUE; if (SUCCEEDED(g_pImport->EnumInit(mdtFieldDef, cl, &hEnumField))) { g_cFieldsMax = g_pImport->EnumGetCount(&hEnumField); g_pImport->EnumClose(&hEnumField); } if(SUCCEEDED(g_pImport->GetClassPackSize(cl,pulPackSize))) ret = TRUE; else *pulPackSize = 0xFFFFFFFF; if(SUCCEEDED(g_pImport->GetClassTotalSize(cl,pulClassSize))) ret = TRUE; else *pulClassSize = 0xFFFFFFFF; if(g_cFieldsMax) { MD_CLASS_LAYOUT Layout; if(SUCCEEDED(g_pImport->GetClassLayoutInit(cl,&Layout))) { g_rFieldOffset = new COR_FIELD_OFFSET[g_cFieldsMax+1]; if(g_rFieldOffset) { COR_FIELD_OFFSET* pFO = g_rFieldOffset; for(g_cFieldOffsets=0; SUCCEEDED(g_pImport->GetClassLayoutNext(&Layout,&(pFO->ridOfField),(ULONG*)&(pFO->ulOffset))) &&RidFromToken(pFO->ridOfField); g_cFieldOffsets++, pFO++) ret = TRUE; } } } return ret; } BOOL IsANestedInB(mdTypeDef A, mdTypeDef B) { DWORD i; for(i = 0; i < g_NumClasses; i++) { if(g_cl_list[i] == A) { A = g_cl_enclosing[i]; if(A == B) return TRUE; if(A == mdTypeDefNil) return FALSE; return IsANestedInB(A,B); } } return FALSE; } mdTypeDef TopEncloser(mdTypeDef A) { DWORD i; for(i = 0; i < g_NumClasses; i++) { if(g_cl_list[i] == A) { if(g_cl_enclosing[i] == mdTypeDefNil) return A; return TopEncloser(g_cl_enclosing[i]); } } return A; } BOOL DumpClass(mdTypeDef cl, DWORD dwEntryPointToken, void* GUICookie, ULONG WhatToDump) // WhatToDump: 0-title,flags,extends,implements; // +1-pack,size and custom attrs; // +2-nested classes // +4-members { char *pszClassName; // name associated with this CL char *pszNamespace; const char *pc1,*pc2; DWORD dwClassAttrs; mdTypeRef crExtends; HRESULT hr; mdInterfaceImpl ii; DWORD NumInterfaces; DWORD i; HENUMInternal hEnumII; // enumerator for interface impl //char *szString; char* szptr; mdToken tkVarOwner = g_tkVarOwner; ULONG WhatToDumpOrig = WhatToDump; if (FAILED(g_pImport->GetNameOfTypeDef( cl, &pc1, //&pszClassName, &pc2))) //&pszNamespace { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), cl); printError(GUICookie, sz); g_tkVarOwner = tkVarOwner; return FALSE; } MAKE_NAME_IF_NONE(pc1,cl); if (g_Mode == MODE_DUMP_CLASS || g_Mode == MODE_DUMP_CLASS_METHOD || g_Mode == MODE_DUMP_CLASS_METHOD_SIG) { if(cl != g_tkClassToDump) { if(IsANestedInB(g_tkClassToDump,cl)) WhatToDump = 2; // nested classes only else return TRUE; } } if (FAILED(g_pImport->GetTypeDefProps( cl, &dwClassAttrs, &crExtends))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), cl); printError(GUICookie, sz); g_tkVarOwner = tkVarOwner; return FALSE; } if(g_fLimitedVisibility) { if(g_fHidePub && (IsTdPublic(dwClassAttrs)||IsTdNestedPublic(dwClassAttrs))) return FALSE; if(g_fHidePriv && (IsTdNotPublic(dwClassAttrs)||IsTdNestedPrivate(dwClassAttrs))) return FALSE; if(g_fHideFam && IsTdNestedFamily(dwClassAttrs)) return FALSE; if(g_fHideAsm && IsTdNestedAssembly(dwClassAttrs)) return FALSE; if(g_fHideFOA && IsTdNestedFamORAssem(dwClassAttrs)) return FALSE; if(g_fHideFAA && IsTdNestedFamANDAssem(dwClassAttrs)) return FALSE; } g_tkVarOwner = cl; pszClassName = (char*)(pc1 ? pc1 : ""); pszNamespace = (char*)(pc2 ? pc2 : ""); szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".class")); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%8.8X*/ "),cl); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)0)); if (IsTdInterface(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"interface "); if (IsTdPublic(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"public "); if (IsTdNotPublic(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"private "); if (IsTdAbstract(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"abstract "); if (IsTdAutoLayout(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"auto "); if (IsTdSequentialLayout(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"sequential "); if (IsTdExplicitLayout(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"explicit "); if (IsTdAnsiClass(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"ansi "); if (IsTdUnicodeClass(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"unicode "); if (IsTdAutoClass(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"autochar "); if (IsTdImport(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"import "); if (IsTdWindowsRuntime(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"windowsruntime "); if (IsTdSerializable(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"serializable "); if (IsTdSealed(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"sealed "); if (IsTdNestedPublic(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"nested public "); if (IsTdNestedPrivate(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"nested private "); if (IsTdNestedFamily(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"nested family "); if (IsTdNestedAssembly(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"nested assembly "); if (IsTdNestedFamANDAssem(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"nested famandassem "); if (IsTdNestedFamORAssem(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"nested famorassem "); if (IsTdBeforeFieldInit(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"beforefieldinit "); if (IsTdSpecialName(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"specialname "); if (IsTdRTSpecialName(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"rtspecialname "); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)-1)); if(*pszNamespace != 0) szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s.",ProperName(pszNamespace)); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),WhatToDump > 2 ? ANCHORPT(ProperName(pszClassName),cl) : JUMPPT(ProperName(pszClassName),cl)); szptr = DumpGenericPars(szString, cl, GUICookie,TRUE); if (szptr == NULL) { g_tkVarOwner = tkVarOwner; return FALSE; } printLine(GUICookie,szString); if (!IsNilToken(crExtends)) { CQuickBytes out; szptr = szString; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s %s ",g_szAsmCodeIndent,KEYWORD("extends")); if(g_pImport->IsValidToken(crExtends)) PrettyPrintToken(szString, crExtends, g_pImport,GUICookie,cl); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),ERRORMSG("INVALID TOKEN: 0x%8.8X"),crExtends); printLine(GUICookie,szString); } hr = g_pImport->EnumInit( mdtInterfaceImpl, cl, &hEnumII); if (FAILED(hr)) { printError(GUICookie,RstrUTF(IDS_E_ENUMINIT)); g_tkVarOwner = tkVarOwner; return FALSE; } NumInterfaces = g_pImport->EnumGetCount(&hEnumII); if (NumInterfaces > 0) { CQuickBytes out; mdTypeRef crInterface; for (i=0; g_pImport->EnumNext(&hEnumII, &ii); i++) { szptr = szString; if(i) szptr+=sprintf_s(szptr,SZSTRING_SIZE, "%s ",g_szAsmCodeIndent); else szptr+=sprintf_s(szptr,SZSTRING_SIZE, "%s %s ",g_szAsmCodeIndent,KEYWORD("implements")); if (FAILED(g_pImport->GetTypeOfInterfaceImpl(ii, &crInterface))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), ii); printError(GUICookie, sz); g_tkVarOwner = tkVarOwner; return FALSE; } if(g_pImport->IsValidToken(crInterface)) PrettyPrintToken(szString, crInterface, g_pImport,GUICookie,cl); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),ERRORMSG("INVALID TOKEN: 0x%8.8X"),crInterface); if(i < NumInterfaces-1) strcat_s(szString, SZSTRING_SIZE,","); printLine(GUICookie,szString); out.Shrink(0); } // The assertion will fire if the enumerator is bad _ASSERTE(NumInterfaces == i); g_pImport->EnumClose(&hEnumII); } if(WhatToDump == 0) // 0 = title only { sprintf_s(szString,SZSTRING_SIZE,"%s%s %s",g_szAsmCodeIndent,SCOPE(),UNSCOPE()); printLine(GUICookie,szString); g_tkVarOwner = tkVarOwner; return TRUE; } sprintf_s(szString,SZSTRING_SIZE,"%s%s",g_szAsmCodeIndent,SCOPE()); printLine(GUICookie,szString); strcat_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH," "); ULONG ulPackSize=0xFFFFFFFF,ulClassSize=0xFFFFFFFF; if(WhatToDump & 1) { if(GetClassLayout(cl,&ulPackSize,&ulClassSize)) { // Dump class layout if(ulPackSize != 0xFFFFFFFF) { sprintf_s(szString,SZSTRING_SIZE,"%s%s %d",g_szAsmCodeIndent,KEYWORD(".pack"),ulPackSize); printLine(GUICookie,szString); } if(ulClassSize != 0xFFFFFFFF) { sprintf_s(szString,SZSTRING_SIZE,"%s%s %d",g_szAsmCodeIndent,KEYWORD(".size"),ulClassSize); printLine(GUICookie,szString); } } DumpCustomAttributes(cl,GUICookie); // Dev11 #10745 // Dump InterfaceImpl custom attributes here if (NumInterfaces > 0 && g_fShowCA) { hr = g_pImport->EnumInit( mdtInterfaceImpl, cl, &hEnumII); if (FAILED(hr)) { printError(GUICookie,RstrUTF(IDS_E_ENUMINIT)); g_tkVarOwner = tkVarOwner; return FALSE; } ASSERT_AND_CHECK(NumInterfaces == g_pImport->EnumGetCount(&hEnumII)); CQuickBytes out; mdTypeRef crInterface; for (i = 0; g_pImport->EnumNext(&hEnumII, &ii); i++) { HENUMInternal hEnum; mdCustomAttribute tkCA; bool fFirst = true; if (FAILED(g_pImport->EnumInit(mdtCustomAttribute, ii,&hEnum))) { return FALSE; } while(g_pImport->EnumNext(&hEnum,&tkCA) && RidFromToken(tkCA)) { if (fFirst) { // Print .interfaceImpl type {type} before the custom attribute list szptr = szString; szptr += sprintf_s(szptr, SZSTRING_SIZE, "%s.%s ", g_szAsmCodeIndent, KEYWORD("interfaceimpl type")); if (FAILED(g_pImport->GetTypeOfInterfaceImpl(ii, &crInterface))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), ii); printError(GUICookie, sz); g_tkVarOwner = tkVarOwner; return FALSE; } if(g_pImport->IsValidToken(crInterface)) PrettyPrintToken(szString, crInterface, g_pImport,GUICookie,cl); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),ERRORMSG("INVALID TOKEN: 0x%8.8X"),crInterface); printLine(GUICookie,szString); out.Shrink(0); szptr = szString; fFirst = false; } DumpCustomAttribute(tkCA,GUICookie,false); } g_pImport->EnumClose( &hEnum); } // The assertion will fire if the enumerator is bad _ASSERTE(NumInterfaces == i); g_pImport->EnumClose(&hEnumII); } DumpGenericParsCA(cl,GUICookie); DumpPermissions(cl,GUICookie); } // Dump method impls declared in this class whose implementing methods belong somewhere else: if(WhatToDump & 1) // 1 - dump headers { for(i = 0; i < g_NumMI; i++) { if(((*g_pmi_list)[i].tkClass == cl)&&((*g_pmi_list)[i].tkBodyParent != cl)) { BOOL bOverridingTypeSpec; PrettyPrintOverrideDecl(i,szString,GUICookie,cl,&bOverridingTypeSpec); strcat_s(szString, SZSTRING_SIZE,KEYWORD(" with ")); if (bOverridingTypeSpec) { // If PrettyPrintOverrideDecl printed the 'method' keyword, we need it here as well // to satisfy the following grammar rule (simplified): // _OVERRIDE METHOD_ ... DCOLON methodName ... WITH_ METHOD_ ... DCOLON methodName ... strcat_s(szString, SZSTRING_SIZE,KEYWORD("method ")); } PrettyPrintToken(szString, (*g_pmi_list)[i].tkBody, g_pImport,GUICookie,0); printLine(GUICookie,szString); } } } if(WhatToDump & 2) // nested classes { BOOL fRegetClassLayout=FALSE; DWORD dwMode = g_Mode; if(g_Mode == MODE_DUMP_CLASS) g_Mode = MODE_DUMP_ALL; for(i = 0; i < g_NumClasses; i++) { if(g_cl_enclosing[i] == cl) { DumpClass(g_cl_list[i],dwEntryPointToken,GUICookie,WhatToDumpOrig); fRegetClassLayout = TRUE; } } if(fRegetClassLayout) GetClassLayout(cl,&ulPackSize,&ulClassSize); g_Mode = dwMode; } if(WhatToDump & 4) { DumpMembers(cl, pszNamespace, pszClassName, dwClassAttrs, dwEntryPointToken,GUICookie); } if(g_szAsmCodeIndent[0]) g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-2] = 0; szptr = szString; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s %s// end of class ",g_szAsmCodeIndent,UNSCOPE(),COMMENT((char*)0)); if(*pszNamespace != 0) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s.",ProperName(pszNamespace)); sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s%s", ProperName(pszClassName),COMMENT((char*)-1)); printLine(GUICookie,szString); printLine(GUICookie,""); g_tkVarOwner = tkVarOwner; return TRUE; } void DumpGlobalMethods(DWORD dwEntryPointToken) { HENUMInternal hEnumMethod; mdToken FuncToken; DWORD i; CQuickBytes qbMemberSig; if (FAILED(g_pImport->EnumGlobalFunctionsInit(&hEnumMethod))) return; for (i = 0; g_pImport->EnumNext(&hEnumMethod, &FuncToken); i++) { if (i == 0) { printLine(g_pFile,""); printLine(g_pFile,COMMENT("// ================== GLOBAL METHODS =========================")); printLine(g_pFile,""); } if(DumpMethod(FuncToken, NULL, dwEntryPointToken, g_pFile, TRUE)&& (g_Mode == MODE_DUMP_CLASS_METHOD || g_Mode == MODE_DUMP_CLASS_METHOD_SIG)) break; } g_pImport->EnumClose(&hEnumMethod); if(i) { printLine(g_pFile,""); printLine(g_pFile,COMMENT("// =============================================================")); printLine(g_pFile,""); } } void DumpGlobalFields() { HENUMInternal hEnum; mdToken FieldToken; DWORD i; CQuickBytes qbMemberSig; if (FAILED(g_pImport->EnumGlobalFieldsInit(&hEnum))) return; for (i = 0; g_pImport->EnumNext(&hEnum, &FieldToken); i++) { if (i == 0) { printLine(g_pFile,""); printLine(g_pFile,COMMENT("// ================== GLOBAL FIELDS ==========================")); printLine(g_pFile,""); } if(DumpField(FieldToken, NULL, g_pFile, TRUE)&& (g_Mode == MODE_DUMP_CLASS_METHOD || g_Mode == MODE_DUMP_CLASS_METHOD_SIG)) break; } g_pImport->EnumClose(&hEnum); if(i) { printLine(g_pFile,""); printLine(g_pFile,COMMENT("// =============================================================")); printLine(g_pFile,""); } } void DumpVTables(IMAGE_COR20_HEADER *CORHeader, void* GUICookie) { IMAGE_COR_VTABLEFIXUP *pFixup,*pDummy; DWORD iCount; DWORD i; USHORT iSlot; char* szStr = &szString[0]; if (VAL32(CORHeader->VTableFixups.VirtualAddress) == 0) return; sprintf_s(szString,SZSTRING_SIZE,"// VTableFixup Directory:"); printLine(GUICookie,szStr); // Pull back a pointer to it. iCount = VAL32(CORHeader->VTableFixups.Size) / sizeof(IMAGE_COR_VTABLEFIXUP); if ((g_pPELoader->getVAforRVA(VAL32(CORHeader->VTableFixups.VirtualAddress), (void **) &pFixup) == FALSE) ||(g_pPELoader->getVAforRVA(VAL32(CORHeader->VTableFixups.VirtualAddress)+VAL32(CORHeader->VTableFixups.Size)-1, (void **) &pDummy) == FALSE)) { printLine(GUICookie,RstrUTF(IDS_E_VTFUTABLE)); goto exit; } // Walk every v-table fixup entry and dump the slots. for (i=0; i<iCount; i++) { sprintf_s(szString,SZSTRING_SIZE,"// IMAGE_COR_VTABLEFIXUP[%d]:", i); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// RVA: 0x%08x", VAL32(pFixup->RVA)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Count: 0x%04x", VAL16(pFixup->Count)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Type: 0x%04x", VAL16(pFixup->Type)); printLine(GUICookie,szStr); BYTE *pSlot; if (g_pPELoader->getVAforRVA(VAL32(pFixup->RVA), (void **) &pSlot) == FALSE) { printLine(GUICookie,RstrUTF(IDS_E_BOGUSRVA)); goto NextEntry; } for (iSlot=0; iSlot<pFixup->Count; iSlot++) { mdMethodDef tkMethod = VAL32(*(DWORD *) pSlot); if (pFixup->Type & VAL16(COR_VTABLE_32BIT)) { sprintf_s(szString,SZSTRING_SIZE,"// [0x%04x] (0x%08x)", iSlot, tkMethod); pSlot += sizeof(DWORD); } else { sprintf_s(szString,SZSTRING_SIZE,"// [0x%04x] (0x%16llx)", iSlot, VAL64(*(unsigned __int64 *) pSlot)); pSlot += sizeof(unsigned __int64); } printLine(GUICookie,szStr); ValidateToken(tkMethod, mdtMethodDef); } // Pointer to next fixup entry. NextEntry: ++pFixup; } exit: printLine(GUICookie,""); } void DumpEATTable(IMAGE_COR20_HEADER *CORHeader, void* GUICookie) { BYTE *pFixup,*pDummy; DWORD iCount; DWORD BufferRVA; DWORD i; char* szStr = &szString[0]; sprintf_s(szString,SZSTRING_SIZE,"// Export Address Table Jumps:"); printLine(GUICookie,szStr); if (VAL32(CORHeader->ExportAddressTableJumps.VirtualAddress) == 0) { printLine(GUICookie,RstrUTF(IDS_E_NODATA)); return; } // Pull back a pointer to it. iCount = VAL32(CORHeader->ExportAddressTableJumps.Size) / IMAGE_COR_EATJ_THUNK_SIZE; if ((g_pPELoader->getVAforRVA(VAL32(CORHeader->ExportAddressTableJumps.VirtualAddress), (void **) &pFixup) == FALSE) ||(g_pPELoader->getVAforRVA(VAL32(CORHeader->ExportAddressTableJumps.VirtualAddress)+VAL32(CORHeader->ExportAddressTableJumps.Size)-1, (void **) &pDummy) == FALSE)) { printLine(GUICookie,RstrUTF(IDS_E_EATJTABLE)); goto exit; } // Quick sanity check on the linker. if (VAL32(CORHeader->ExportAddressTableJumps.Size) % IMAGE_COR_EATJ_THUNK_SIZE) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_EATJSIZE), VAL32(CORHeader->ExportAddressTableJumps.Size), IMAGE_COR_EATJ_THUNK_SIZE); printLine(GUICookie,szStr); } // Walk every v-table fixup entry and dump the slots. BufferRVA = VAL32(CORHeader->ExportAddressTableJumps.VirtualAddress); for (i=0; i<iCount; i++) { ULONG ReservedFlag = VAL32(*(ULONG *) (pFixup + sizeof(ULONG))); sprintf_s(szString,SZSTRING_SIZE,"// Fixup Jump Entry [%d], at RVA 0x%08x:", i, BufferRVA); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// RVA of slot: 0x%08x", VAL32(*(ULONG *) pFixup)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Reserved flag: 0x%08x", ReservedFlag); printLine(GUICookie,szStr); if (ReservedFlag != 0) { printLine(GUICookie,RstrUTF(IDS_E_RESFLAGS)); } pFixup += IMAGE_COR_EATJ_THUNK_SIZE; BufferRVA += IMAGE_COR_EATJ_THUNK_SIZE; } exit: printLine(GUICookie,""); } void DumpCodeManager(IMAGE_COR20_HEADER *CORHeader, void* GUICookie) { char* szStr = &szString[0]; sprintf_s(szString,SZSTRING_SIZE,"// Code Manager Table:"); printLine(GUICookie,szStr); if (!VAL32(CORHeader->CodeManagerTable.Size)) { sprintf_s(szString,SZSTRING_SIZE,"// default"); printLine(GUICookie,szStr); return; } const GUID *pcm; if (g_pPELoader->getVAforRVA(VAL32(CORHeader->CodeManagerTable.VirtualAddress), (void **) &pcm) == FALSE) { printLine(GUICookie,RstrUTF(IDS_E_CODEMGRTBL)); return; } sprintf_s(szString,SZSTRING_SIZE,"// [index] ID"); printLine(GUICookie,szStr); ULONG iCount = VAL32(CORHeader->CodeManagerTable.Size) / sizeof(GUID); for (ULONG i=0; i<iCount; i++) { WCHAR rcguid[128]; GUID Guid = *pcm; SwapGuid(&Guid); StringFromGUID2(Guid, rcguid, ARRAY_SIZE(rcguid)); sprintf_s(szString,SZSTRING_SIZE,"// [0x%08x] %S", i, rcguid); printLine(GUICookie,szStr); pcm++; } printLine(GUICookie,""); } void DumpSectionHeaders(IMAGE_SECTION_HEADER* pSH, USHORT nSH, void* GUICookie) { char* szStr = &szString[0]; char name[16]; printLine(GUICookie,""); strcpy_s(szString,SZSTRING_SIZE,"// Image sections:"); printLine(GUICookie,szStr); for(USHORT iSH=0; iSH < nSH; iSH++,pSH++) { strncpy_s(name,16,(const char*)(pSH->Name),8); name[8]=0; sprintf_s(szString,SZSTRING_SIZE,"// %s",name); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Virtual Size", pSH->Misc.VirtualSize); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Virtual Address", pSH->VirtualAddress); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Size of Raw Data", pSH->SizeOfRawData); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Pointer to Raw Data", pSH->PointerToRawData); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Pointer to Relocations", pSH->PointerToRelocations); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Pointer to Linenumbers", pSH->PointerToLinenumbers); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%04x Number of Relocations", pSH->NumberOfRelocations); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%04x Number of Linenumbers", pSH->NumberOfLinenumbers); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Characteristics", pSH->Characteristics); printLine(GUICookie,szStr); if((pSH->Characteristics & IMAGE_SCN_SCALE_INDEX)) { strcpy_s(szString,SZSTRING_SIZE,"// SCALE_INDEX"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_CNT_CODE)) { strcpy_s(szString,SZSTRING_SIZE,"// CNT_CODE"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_CNT_INITIALIZED_DATA)) { strcpy_s(szString,SZSTRING_SIZE,"// CNT_INITIALIZED_DATA"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_CNT_UNINITIALIZED_DATA)) { strcpy_s(szString,SZSTRING_SIZE,"// CNT_UNINITIALIZED_DATA"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_NO_DEFER_SPEC_EXC)) { strcpy_s(szString,SZSTRING_SIZE,"// NO_DEFER_SPEC_EXC"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_LNK_NRELOC_OVFL)) { strcpy_s(szString,SZSTRING_SIZE,"// LNK_NRELOC_OVFL"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_MEM_DISCARDABLE)) { strcpy_s(szString,SZSTRING_SIZE,"// MEM_DISCARDABLE"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_MEM_NOT_CACHED)) { strcpy_s(szString,SZSTRING_SIZE,"// MEM_NOT_CACHED"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_MEM_NOT_PAGED)) { strcpy_s(szString,SZSTRING_SIZE,"// MEM_NOT_PAGED"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_MEM_SHARED)) { strcpy_s(szString,SZSTRING_SIZE,"// MEM_SHARED"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_MEM_EXECUTE)) { strcpy_s(szString,SZSTRING_SIZE,"// MEM_EXECUTE"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_MEM_READ)) { strcpy_s(szString,SZSTRING_SIZE,"// MEM_READ"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_MEM_WRITE)) { strcpy_s(szString,SZSTRING_SIZE,"// MEM_WRITE"); printLine(GUICookie,szStr); } printLine(GUICookie,""); } } void DumpBaseReloc(const char *szName, IMAGE_DATA_DIRECTORY *pDir, void* GUICookie) { char* szStr = &szString[0]; sprintf_s(szString,SZSTRING_SIZE,"// %s", szName); printLine(GUICookie,szStr); if (!VAL32(pDir->Size)) { printLine(GUICookie,RstrUTF(IDS_E_NODATA)); return; } char *pBegin, *pEnd; DWORD *pdw, i, Nentries; WORD *pw; if (g_pPELoader->getVAforRVA(VAL32(pDir->VirtualAddress), (void **) &pBegin) == FALSE) { printLine(GUICookie,RstrUTF(IDS_E_IMPORTDATA)); return; } pEnd = pBegin + VAL32(pDir->Size); for(pdw = (DWORD*)pBegin; pdw < (DWORD*)pEnd; ) { sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Page RVA", *pdw); printLine(GUICookie,szStr); pdw++; sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Block Size", *pdw); printLine(GUICookie,szStr); Nentries = (*pdw - 2*sizeof(DWORD)) / sizeof(WORD); pdw++; sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Number of Entries", Nentries); printLine(GUICookie,szStr); for(i = 1, pw = (WORD*)pdw; i <= Nentries; i++, pw++) { sprintf_s(szString,SZSTRING_SIZE,"// Entry %d: Type 0x%x Offset 0x%08x", i, ((*pw)>>12), ((*pw)&0x0FFF)); printLine(GUICookie,szStr); } if((Nentries & 1)) pw++; // to make pdw DWORD-aligned pdw = (DWORD*)pw; printLine(GUICookie,""); } } void DumpIAT(const char *szName, IMAGE_DATA_DIRECTORY *pDir, void* GUICookie) { char* szStr = &szString[0]; sprintf_s(szString,SZSTRING_SIZE,"// %s", szName); printLine(GUICookie,szStr); if (!VAL32(pDir->Size)) { printLine(GUICookie,RstrUTF(IDS_E_NODATA)); return; } const char *szDLLName; const IMAGE_IMPORT_DESCRIPTOR *pImportDesc; if (g_pPELoader->getVAforRVA(VAL32(pDir->VirtualAddress), (void **) &pImportDesc) == FALSE) { printLine(GUICookie,RstrUTF(IDS_E_IMPORTDATA)); return; } const DWORD *pImportTableID; while (VAL32(pImportDesc->FirstThunk)) { if (g_pPELoader->getVAforRVA(VAL32(pImportDesc->Name), (void **) &szDLLName) == FALSE || g_pPELoader->getVAforRVA(VAL32(pImportDesc->FirstThunk), (void **) &pImportTableID) == FALSE) { printLine(GUICookie,RstrUTF(IDS_E_IMPORTDATA)); return; } sprintf_s(szString,SZSTRING_SIZE,"// DLL : %s", szDLLName); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Import Address Table", VAL32(pImportDesc->FirstThunk)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Import Name Table", VAL32(pImportDesc->Name)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// %-8d Time Date Stamp", VAL32(pImportDesc->TimeDateStamp)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// %-8d Index of First Forwarder Reference", VAL32(pImportDesc->ForwarderChain)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"//"); printLine(GUICookie,szStr); for ( ; VAL32(*pImportTableID); pImportTableID++) { if (VAL32(*pImportTableID) & 0x80000000) sprintf_s(szString,SZSTRING_SIZE,"// by Ordinal %d", VAL32(*pImportTableID) & 0x7fffffff); else { const IMAGE_IMPORT_BY_NAME *pName; if(g_pPELoader->getVAforRVA(VAL32(*pImportTableID) & 0x7fffffff, (void **) &pName)) sprintf_s(szString,SZSTRING_SIZE,"// 0x%04x %s", VAL16(pName->Hint), pName->Name); else sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x bad RVA of IMAGE_IMPORT_BY_NAME", VAL32(*pImportTableID)); } printLine(GUICookie,szStr); } printLine(GUICookie,""); // Next import descriptor. pImportDesc++; } } struct MDStreamHeader { DWORD Reserved; BYTE Major; BYTE Minor; BYTE Heaps; BYTE Rid; ULONGLONG MaskValid; ULONGLONG Sorted; }; void DumpMetadataHeader(const char *szName, IMAGE_DATA_DIRECTORY *pDir, void* GUICookie) { char* szStr = &szString[0]; printLine(GUICookie,""); sprintf_s(szString,SZSTRING_SIZE,"// %s", szName); printLine(GUICookie,szStr); if (!VAL32(pDir->Size)) { printLine(GUICookie,RstrUTF(IDS_E_NODATA)); return; } const STORAGESIGNATURE *pSSig; char verstr[1024]; if (g_pPELoader->getVAforRVA(VAL32(pDir->VirtualAddress), (void **) &pSSig) == FALSE) { printLine(GUICookie,RstrUTF(IDS_E_IMPORTDATA)); return; } strcpy_s(szString,SZSTRING_SIZE,"// Storage Signature:"); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Signature", VAL32(pSSig->lSignature)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%04x Major Version", VAL16(pSSig->iMajorVer)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%04x Minor Version", VAL16(pSSig->iMinorVer)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Extra Data Offset", VAL32(pSSig->iExtraData)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Version String Length", VAL32(pSSig->iVersionString)); printLine(GUICookie,szStr); memset(verstr,0,1024); strncpy_s(verstr,1024,(const char*)(pSSig->pVersion),VAL32(pSSig->iVersionString)); sprintf_s(szString,SZSTRING_SIZE,"// '%s' Version String", verstr); printLine(GUICookie,szStr); size_t pb = (size_t)pSSig; pb += (3*sizeof(DWORD)+2*sizeof(WORD)+VAL32(pSSig->iVersionString)+3)&~3; PSTORAGEHEADER pSHdr = (PSTORAGEHEADER)pb; strcpy_s(szString,SZSTRING_SIZE,"// Storage Header:"); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%02x Flags", pSHdr->fFlags); printLine(GUICookie,szStr); short nStr = VAL16(pSHdr->iStreams); sprintf_s(szString,SZSTRING_SIZE,"// 0x%04x Number of Streams", nStr); if(nStr > 5) { strcat_s(szString, SZSTRING_SIZE, " -- BOGUS!"); nStr = 5; } printLine(GUICookie,szStr); PSTORAGESTREAM pStr = (PSTORAGESTREAM)(pSHdr+1); BYTE* pbMDstream = NULL; size_t cbMDstream = 0; for(short iStr = 1; iStr <= nStr; iStr++) { sprintf_s(szString,SZSTRING_SIZE,"// Stream %d:",iStr); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Offset", VAL32(pStr->iOffset)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Size", VAL32(pStr->iSize)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// '%s' Name", pStr->rcName); printLine(GUICookie,szStr); if((strcmp(pStr->rcName,"#-")==0)||(strcmp(pStr->rcName,"#~")==0)) { pbMDstream = (BYTE*)pSSig + VAL32(pStr->iOffset); cbMDstream = VAL32(pStr->iSize); } pb = (size_t)pStr; pb += (2*sizeof(DWORD)+strlen(pStr->rcName)+1+3)&~3; pStr = (PSTORAGESTREAM)pb; } if((pbMDstream)&&(cbMDstream >= sizeof(MDStreamHeader))) { printLine(GUICookie,""); strcpy_s(szString,SZSTRING_SIZE,"// Metadata Stream Header:"); printLine(GUICookie,szStr); MDStreamHeader* pMDSH = (MDStreamHeader*)pbMDstream; sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Reserved", VAL32(pMDSH->Reserved)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%02x Major", pMDSH->Major); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%02x Minor", pMDSH->Minor); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%02x Heaps", pMDSH->Heaps); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%02x Rid", pMDSH->Rid); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%016I64x MaskValid", (ULONGLONG)GET_UNALIGNED_VAL64(&(pMDSH->MaskValid))); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%016I64x Sorted", (ULONGLONG)GET_UNALIGNED_VAL64(&(pMDSH->Sorted))); printLine(GUICookie,szStr); } } void DumpEntryPoint(DWORD dwAddrOfEntryPoint,DWORD dwEntryPointSize,void* GUICookie) { BYTE* pB; char* szStr = &szString[0]; char* szptr = szStr+2; DWORD i; printLine(GUICookie,""); strcpy_s(szString,SZSTRING_SIZE,"// Entry point code:"); printLine(GUICookie,szStr); if (g_pPELoader->getVAforRVA(dwAddrOfEntryPoint, (void **) &pB) == FALSE) { printLine(GUICookie,"Bad RVA of entry point"); return; } if(dwEntryPointSize == 48) pB -= 32; // on IA64, AddressOfEntryPoint points at PLabelDescriptor, not at the stub itself for(i=0; i<dwEntryPointSize; i++) { szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%2.2X ",pB[i]); } printLine(GUICookie,szStr); } #define DUMP_DIRECTORY(szName, Directory) \ sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x [0x%08x] address [size] of " szName, \ VAL32(Directory.VirtualAddress), VAL32(Directory.Size)); \ printLine(GUICookie,szStr) #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:21000) // Suppress PREFast warning about overly large function #endif void DumpHeader(IMAGE_COR20_HEADER *CORHeader, void* GUICookie) { char* szStr = &szString[0]; DWORD dwAddrOfEntryPoint=0, dwEntryPointSize=0; PIMAGE_DOS_HEADER pDOSHeader = g_pPELoader->dosHeader(); strcpy_s(szString,SZSTRING_SIZE,"// ----- DOS Header:"); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Magic: 0x%04x", VAL16(pDOSHeader->e_magic)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Bytes on last page: 0x%04x", VAL16(pDOSHeader->e_cblp)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Pages in file: 0x%04x", VAL16(pDOSHeader->e_cp)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Relocations: 0x%04x", VAL16(pDOSHeader->e_crlc)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of header (paragraphs):0x%04x", VAL16(pDOSHeader->e_cparhdr)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Min extra paragraphs: 0x%04x", VAL16(pDOSHeader->e_minalloc)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Max extra paragraphs: 0x%04x", VAL16(pDOSHeader->e_maxalloc)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Initial (relative) SS: 0x%04x", VAL16(pDOSHeader->e_ss)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Initial SP: 0x%04x", VAL16(pDOSHeader->e_sp)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Checksum: 0x%04x", VAL16(pDOSHeader->e_csum)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Initial IP: 0x%04x", VAL16(pDOSHeader->e_ip)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Initial (relative) CS: 0x%04x", VAL16(pDOSHeader->e_ip)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// File addr. of reloc table: 0x%04x", VAL16(pDOSHeader->e_lfarlc)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Overlay number: 0x%04x", VAL16(pDOSHeader->e_ovno)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// OEM identifier: 0x%04x", VAL16(pDOSHeader->e_oemid)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// OEM info: 0x%04x", VAL16(pDOSHeader->e_oeminfo)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// File addr. of COFF header: 0x%04x", VAL16(pDOSHeader->e_lfanew)); printLine(GUICookie,szStr); strcpy_s(szString,SZSTRING_SIZE,"// ----- COFF/PE Headers:"); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Signature: 0x%08x", VAL32(g_pPELoader->Signature())); printLine(GUICookie,szStr); strcpy_s(szString,SZSTRING_SIZE,"// ----- COFF Header:"); printLine(GUICookie,szStr); PIMAGE_FILE_HEADER pCOFF = g_pPELoader->coffHeader(); sprintf_s(szString,SZSTRING_SIZE,"// Machine: 0x%04x", VAL16(pCOFF->Machine)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Number of sections: 0x%04x", VAL16(pCOFF->NumberOfSections)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Time-date stamp: 0x%08x", VAL32(pCOFF->TimeDateStamp)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Ptr to symbol table: 0x%08x", VAL32(pCOFF->PointerToSymbolTable)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Number of symbols: 0x%08x", VAL32(pCOFF->NumberOfSymbols)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of optional header: 0x%04x", VAL16(pCOFF->SizeOfOptionalHeader)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Characteristics: 0x%04x", VAL16(pCOFF->Characteristics)); printLine(GUICookie,szStr); if (g_pPELoader->IsPE32()) { IMAGE_NT_HEADERS32 *pNTHeader = g_pPELoader->ntHeaders32(); IMAGE_OPTIONAL_HEADER32 *pOptHeader = &pNTHeader->OptionalHeader; strcpy_s(szString,SZSTRING_SIZE,"// ----- PE Optional Header (32 bit):"); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Magic: 0x%04x", VAL16(pOptHeader->Magic)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major linker version: 0x%02x", VAL16(pOptHeader->MajorLinkerVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor linker version: 0x%02x", VAL16(pOptHeader->MinorLinkerVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of code: 0x%08x", VAL32(pOptHeader->SizeOfCode)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of init.data: 0x%08x", VAL32(pOptHeader->SizeOfInitializedData)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of uninit.data: 0x%08x", VAL32(pOptHeader->SizeOfUninitializedData)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Addr. of entry point: 0x%08x", VAL32(pOptHeader->AddressOfEntryPoint)); printLine(GUICookie,szStr); dwAddrOfEntryPoint = VAL32(pOptHeader->AddressOfEntryPoint); dwEntryPointSize = 6; sprintf_s(szString,SZSTRING_SIZE,"// Base of code: 0x%08x", VAL32(pOptHeader->BaseOfCode)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Base of data: 0x%08x", VAL32(pOptHeader->BaseOfData)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Image base: 0x%08x", VAL32(pOptHeader->ImageBase)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Section alignment: 0x%08x", VAL32(pOptHeader->SectionAlignment)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// File alignment: 0x%08x", VAL32(pOptHeader->FileAlignment)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major OS version: 0x%04x", VAL16(pOptHeader->MajorOperatingSystemVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor OS version: 0x%04x", VAL16(pOptHeader->MinorOperatingSystemVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major image version: 0x%04x", VAL16(pOptHeader->MajorImageVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor image version: 0x%04x", VAL16(pOptHeader->MinorImageVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major subsystem version: 0x%04x", VAL16(pOptHeader->MajorSubsystemVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor subsystem version: 0x%04x", VAL16(pOptHeader->MinorSubsystemVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of image: 0x%08x", VAL32(pOptHeader->SizeOfImage)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of headers: 0x%08x", VAL32(pOptHeader->SizeOfHeaders)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Checksum: 0x%08x", VAL32(pOptHeader->CheckSum)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Subsystem: 0x%04x", VAL16(pOptHeader->Subsystem)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// DLL characteristics: 0x%04x", VAL16(pOptHeader->DllCharacteristics)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of stack reserve: 0x%08x", VAL32(pOptHeader->SizeOfStackReserve)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of stack commit: 0x%08x", VAL32(pOptHeader->SizeOfStackCommit)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of heap reserve: 0x%08x", VAL32(pOptHeader->SizeOfHeapReserve)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of heap commit: 0x%08x", VAL32(pOptHeader->SizeOfHeapCommit)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Loader flags: 0x%08x", VAL32(pOptHeader->LoaderFlags)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Directories: 0x%08x", VAL32(pOptHeader->NumberOfRvaAndSizes)); printLine(GUICookie,szStr); DUMP_DIRECTORY("Export Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT]); DUMP_DIRECTORY("Import Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT]); DUMP_DIRECTORY("Resource Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_RESOURCE]); DUMP_DIRECTORY("Exception Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_EXCEPTION]); DUMP_DIRECTORY("Security Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_SECURITY]); DUMP_DIRECTORY("Base Relocation Table: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_BASERELOC]); DUMP_DIRECTORY("Debug Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_DEBUG]); DUMP_DIRECTORY("Architecture Specific: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_ARCHITECTURE]); DUMP_DIRECTORY("Global Pointer: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_GLOBALPTR]); DUMP_DIRECTORY("TLS Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_TLS]); DUMP_DIRECTORY("Load Config Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG]); DUMP_DIRECTORY("Bound Import Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT]); DUMP_DIRECTORY("Import Address Table: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_IAT]); DUMP_DIRECTORY("Delay Load IAT: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT]); DUMP_DIRECTORY("CLR Header: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR]); printLine(GUICookie,""); DumpSectionHeaders((IMAGE_SECTION_HEADER*)(pOptHeader+1),pNTHeader->FileHeader.NumberOfSections,GUICookie); DumpBaseReloc("Base Relocation Table",&pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_BASERELOC],GUICookie); DumpIAT("Import Address Table", &pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT],GUICookie); DumpIAT("Delay Load Import Address Table", &pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT],GUICookie); } else { IMAGE_NT_HEADERS64 *pNTHeader = g_pPELoader->ntHeaders64(); IMAGE_OPTIONAL_HEADER64 *pOptHeader = &pNTHeader->OptionalHeader; strcpy_s(szString,SZSTRING_SIZE,"// ----- PE Optional Header (64 bit):"); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Magic: 0x%04x", VAL16(pOptHeader->Magic)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major linker version: 0x%02x", VAL16(pOptHeader->MajorLinkerVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor linker version: 0x%02x", VAL16(pOptHeader->MinorLinkerVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of code: 0x%08x", VAL32(pOptHeader->SizeOfCode)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of init.data: 0x%08x", VAL32(pOptHeader->SizeOfInitializedData)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of uninit.data: 0x%08x", VAL32(pOptHeader->SizeOfUninitializedData)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Addr. of entry point: 0x%08x", VAL32(pOptHeader->AddressOfEntryPoint)); printLine(GUICookie,szStr); dwAddrOfEntryPoint = VAL32(pOptHeader->AddressOfEntryPoint); dwEntryPointSize = (VAL16(pCOFF->Machine)==IMAGE_FILE_MACHINE_IA64) ? 48 : 12; sprintf_s(szString,SZSTRING_SIZE,"// Base of code: 0x%08x", VAL32(pOptHeader->BaseOfCode)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Image base: 0x%016I64x", VAL64(pOptHeader->ImageBase)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Section alignment: 0x%08x", VAL32(pOptHeader->SectionAlignment)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// File alignment: 0x%08x", VAL32(pOptHeader->FileAlignment)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major OS version: 0x%04x", VAL16(pOptHeader->MajorOperatingSystemVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor OS version: 0x%04x", VAL16(pOptHeader->MinorOperatingSystemVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major image version: 0x%04x", VAL16(pOptHeader->MajorImageVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor image version: 0x%04x", VAL16(pOptHeader->MinorImageVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major subsystem version: 0x%04x", VAL16(pOptHeader->MajorSubsystemVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor subsystem version: 0x%04x", VAL16(pOptHeader->MinorSubsystemVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of image: 0x%08x", VAL32(pOptHeader->SizeOfImage)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of headers: 0x%08x", VAL32(pOptHeader->SizeOfHeaders)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Checksum: 0x%08x", VAL32(pOptHeader->CheckSum)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Subsystem: 0x%04x", VAL16(pOptHeader->Subsystem)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// DLL characteristics: 0x%04x", VAL16(pOptHeader->DllCharacteristics)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of stack reserve: 0x%016I64x", VAL64(pOptHeader->SizeOfStackReserve)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of stack commit: 0x%016I64x", VAL64(pOptHeader->SizeOfStackCommit)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of heap reserve: 0x%016I64x", VAL64(pOptHeader->SizeOfHeapReserve)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of heap commit: 0x%016I64x", VAL64(pOptHeader->SizeOfHeapCommit)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Loader flags: 0x%08x", VAL32(pOptHeader->LoaderFlags)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Directories: 0x%08x", VAL32(pOptHeader->NumberOfRvaAndSizes)); printLine(GUICookie,szStr); DUMP_DIRECTORY("Export Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT]); DUMP_DIRECTORY("Import Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT]); DUMP_DIRECTORY("Resource Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_RESOURCE]); DUMP_DIRECTORY("Exception Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_EXCEPTION]); DUMP_DIRECTORY("Security Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_SECURITY]); DUMP_DIRECTORY("Base Relocation Table: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_BASERELOC]); DUMP_DIRECTORY("Debug Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_DEBUG]); DUMP_DIRECTORY("Architecture Specific: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_ARCHITECTURE]); DUMP_DIRECTORY("Global Pointer: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_GLOBALPTR]); DUMP_DIRECTORY("TLS Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_TLS]); DUMP_DIRECTORY("Load Config Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG]); DUMP_DIRECTORY("Bound Import Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT]); DUMP_DIRECTORY("Import Address Table: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_IAT]); DUMP_DIRECTORY("Delay Load IAT: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT]); DUMP_DIRECTORY("CLR Header: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR]); printLine(GUICookie,""); DumpSectionHeaders((IMAGE_SECTION_HEADER*)(pOptHeader+1),pNTHeader->FileHeader.NumberOfSections,GUICookie); DumpBaseReloc("Base Relocation Table",&pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_BASERELOC],GUICookie); DumpIAT("Import Address Table", &pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT],GUICookie); DumpIAT("Delay Load Import Address Table", &pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT],GUICookie); } if(dwAddrOfEntryPoint != 0) DumpEntryPoint(dwAddrOfEntryPoint,dwEntryPointSize,GUICookie); printLine(GUICookie,""); printLine(GUICookie,""); if (!CORHeader) { printLine(GUICookie,RstrUTF(IDS_E_COMIMAGE)); return; } strcpy_s(szString,SZSTRING_SIZE,"// ----- CLR Header:"); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Header size: 0x%08x", VAL32(CORHeader->cb)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major runtime version: 0x%04x", VAL16(CORHeader->MajorRuntimeVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor runtime version: 0x%04x", VAL16(CORHeader->MinorRuntimeVersion)); printLine(GUICookie,szStr); // Metadata DUMP_DIRECTORY("Metadata Directory: ", CORHeader->MetaData); sprintf_s(szString,SZSTRING_SIZE,"// Flags: 0x%08x", VAL32(CORHeader->Flags)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Entry point token: 0x%08x", VAL32(IMAGE_COR20_HEADER_FIELD(*CORHeader, EntryPointToken))); printLine(GUICookie,szStr); // Binding DUMP_DIRECTORY("Resources Directory: ", CORHeader->Resources); DUMP_DIRECTORY("Strong Name Signature: ", CORHeader->StrongNameSignature); DUMP_DIRECTORY("CodeManager Table: ", CORHeader->CodeManagerTable); // Fixups DUMP_DIRECTORY("VTableFixups Directory: ", CORHeader->VTableFixups); DUMP_DIRECTORY("Export Address Table: ", CORHeader->ExportAddressTableJumps); // Managed Native Code DUMP_DIRECTORY("Precompile Header: ", CORHeader->ManagedNativeHeader); DumpMetadataHeader("Metadata Header",&(CORHeader->MetaData),GUICookie); } #ifdef _PREFAST_ #pragma warning(pop) #endif void DumpHeaderDetails(IMAGE_COR20_HEADER *CORHeader, void* GUICookie) { printLine(GUICookie,""); DumpCodeManager(CORHeader,GUICookie); printLine(GUICookie,""); DumpVTables(CORHeader,GUICookie); printLine(GUICookie,""); DumpEATTable(CORHeader,GUICookie); printLine(GUICookie,""); } void WritePerfData(const char *KeyDesc, const char *KeyName, const char *UnitDesc, const char *UnitName, void* Value, BOOL IsInt) { DWORD BytesWritten; if(!g_fDumpToPerfWriter) return; if (!g_PerfDataFilePtr) { if((g_PerfDataFilePtr = WszCreateFile(W("c:\\temp\\perfdata.dat"), GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ, NULL, OPEN_ALWAYS, 0, NULL) ) == INVALID_HANDLE_VALUE) { printLine(NULL,"PefTimer::LogStoppedTime(): Unable to open the FullPath file. No performance data will be generated"); g_fDumpToPerfWriter = FALSE; return; } WriteFile(g_PerfDataFilePtr,"ExecTime=0\r\n",13,&BytesWritten,NULL); WriteFile(g_PerfDataFilePtr,"ExecUnit=bytes\r\n",17,&BytesWritten,NULL); WriteFile(g_PerfDataFilePtr,"ExecUnitDescr=File Size\r\n",26,&BytesWritten,NULL); WriteFile(g_PerfDataFilePtr,"ExeciDirection=False\r\n",23,&BytesWritten,NULL); } char ValueStr[10]; char TmpStr[201]; if (IsInt) { sprintf_s(ValueStr,10,"%d",(int)*(int*)Value); } else { sprintf_s(ValueStr,10,"%5.2f",(float)*(float*)Value); } sprintf_s(TmpStr, 201, "%s=%s\r\n", KeyName, ValueStr); WriteFile(g_PerfDataFilePtr, TmpStr, (DWORD)strlen(TmpStr), &BytesWritten, NULL); sprintf_s(TmpStr, 201, "%s Descr=%s\r\n", KeyName, KeyDesc); WriteFile(g_PerfDataFilePtr, TmpStr, (DWORD)strlen(TmpStr), &BytesWritten, NULL); sprintf_s(TmpStr, 201, "%s Unit=%s\r\n", KeyName, UnitName); WriteFile(g_PerfDataFilePtr, TmpStr, (DWORD)strlen(TmpStr), &BytesWritten, NULL); sprintf_s(TmpStr, 201, "%s Unit Descr=%s\r\n", KeyName, UnitDesc); WriteFile(g_PerfDataFilePtr, TmpStr, (DWORD)strlen(TmpStr), &BytesWritten, NULL); sprintf_s(TmpStr, 201, "%s IDirection=%s\r\n", KeyName, "False"); WriteFile(g_PerfDataFilePtr, TmpStr, (DWORD)strlen(TmpStr), &BytesWritten, NULL); } void WritePerfDataInt(const char *KeyDesc, const char *KeyName, const char *UnitDesc, const char *UnitName, int Value) { WritePerfData(KeyDesc,KeyName,UnitDesc,UnitName, (void*)&Value, TRUE); } void WritePerfDataFloat(const char *KeyDesc, const char *KeyName, const char *UnitDesc, const char *UnitName, float Value) { WritePerfData(KeyDesc,KeyName,UnitDesc,UnitName, (void*)&Value, FALSE); } IMetaDataTables *pITables = NULL; //ULONG sizeRec, count; //int size, size2; int metaSize = 0; __int64 fTableSeen; inline void TableSeen(unsigned long n) { fTableSeen |= (I64(1) << n); } inline int IsTableSeen(unsigned long n) { return (fTableSeen & (I64(1) << n)) ? 1 : 0;} inline void TableSeenReset() { fTableSeen = 0;} void DumpTable(unsigned long Table, const char *TableName, void* GUICookie) { char *szStr = &szString[0]; const char **ppTableName = 0; int size; ULONG sizeRec, count; // Record that this table has been seen. TableSeen(Table); // If no name passed in, get from table info. if (!TableName) ppTableName = &TableName; pITables->GetTableInfo(Table, &sizeRec, &count, NULL, NULL, ppTableName); if(count > 0) { metaSize += size = count * sizeRec; WritePerfDataInt(TableName,TableName,"count","count",count); WritePerfDataInt(TableName,TableName,"bytes","bytes",size); sprintf_s(szString,SZSTRING_SIZE,"// %-14s- %4d (%d bytes)", TableName, count, size); printLine(GUICookie,szStr); } } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:21000) // Suppress PREFast warning about overly large function #endif void DumpStatistics(IMAGE_COR20_HEADER *CORHeader, void* GUICookie) { int fileSize, miscPESize, miscCOMPlusSize, methodHeaderSize, methodBodySize; int methodBodies, fatHeaders, tinyHeaders, deprecatedHeaders; int size, size2; int fatSections, smallSections; ULONG methodDefs; ULONG i; ULONG sizeRec, count; char buf[MAX_MEMBER_LENGTH]; char* szStr = &szString[0]; TableSeenReset(); metaSize = 0; sprintf_s(szString,SZSTRING_SIZE,"// File size : %d", fileSize = SafeGetFileSize(g_pPELoader->getHFile(), NULL)); printLine(GUICookie,szStr); WritePerfDataInt("FileSize","FileSize","standard byte","bytes",fileSize); if (g_pPELoader->IsPE32()) { size = VAL32(((IMAGE_DOS_HEADER*) g_pPELoader->getHModule())->e_lfanew) + sizeof(IMAGE_NT_HEADERS32) - sizeof(IMAGE_OPTIONAL_HEADER32) + VAL16(g_pPELoader->ntHeaders32()->FileHeader.SizeOfOptionalHeader) + VAL16(g_pPELoader->ntHeaders32()->FileHeader.NumberOfSections) * sizeof(IMAGE_SECTION_HEADER); size2 = (size + VAL32(g_pPELoader->ntHeaders32()->OptionalHeader.FileAlignment) - 1) & ~(VAL32(g_pPELoader->ntHeaders32()->OptionalHeader.FileAlignment) - 1); } else { size = VAL32(((IMAGE_DOS_HEADER*) g_pPELoader->getHModule())->e_lfanew) + sizeof(IMAGE_NT_HEADERS64) - sizeof(IMAGE_OPTIONAL_HEADER64) + VAL16(g_pPELoader->ntHeaders64()->FileHeader.SizeOfOptionalHeader) + VAL16(g_pPELoader->ntHeaders64()->FileHeader.NumberOfSections) * sizeof(IMAGE_SECTION_HEADER); size2 = (size + VAL32(g_pPELoader->ntHeaders64()->OptionalHeader.FileAlignment) - 1) & ~(VAL32(g_pPELoader->ntHeaders64()->OptionalHeader.FileAlignment) - 1); } DWORD sizeOfHeaders; if (g_pPELoader->IsPE32()) { sizeOfHeaders = VAL32(g_pPELoader->ntHeaders32()->OptionalHeader.SizeOfHeaders); WritePerfDataInt("PE header size", "PE header size", "standard byte", "bytes", sizeOfHeaders); WritePerfDataInt("PE header size used", "PE header size used", "standard byte", "bytes", size); WritePerfDataFloat("PE header size", "PE header size", "percentage", "percentage", (float)((sizeOfHeaders * 100) / fileSize)); sprintf_s(szString,SZSTRING_SIZE,"// PE header size : %d (%d used) (%5.2f%%)", sizeOfHeaders, size, (double) (sizeOfHeaders * 100) / fileSize); printLine(GUICookie,szStr); miscPESize = 0; for (i=0; i < VAL32(g_pPELoader->ntHeaders32()->OptionalHeader.NumberOfRvaAndSizes); ++i) { // Skip the CLR header. if (i != 15) miscPESize += (int) VAL32(g_pPELoader->ntHeaders32()->OptionalHeader.DataDirectory[i].Size); } } else { sizeOfHeaders = VAL32(g_pPELoader->ntHeaders64()->OptionalHeader.SizeOfHeaders); WritePerfDataInt("PE+ header size", "PE header size", "standard byte", "bytes", sizeOfHeaders); WritePerfDataInt("PE+ header size used", "PE header size used", "standard byte", "bytes", size); WritePerfDataFloat("PE+ header size", "PE header size", "percentage", "percentage", (float)((sizeOfHeaders * 100) / fileSize)); sprintf_s(szString,SZSTRING_SIZE,"// PE header size : %d (%d used) (%5.2f%%)", sizeOfHeaders, size, (double) (sizeOfHeaders * 100) / fileSize); printLine(GUICookie,szStr); miscPESize = 0; for (i=0; i < VAL32(g_pPELoader->ntHeaders64()->OptionalHeader.NumberOfRvaAndSizes); ++i) { // Skip the CLR header. if (i != IMAGE_DIRECTORY_ENTRY_COMHEADER) miscPESize += (int) VAL32(g_pPELoader->ntHeaders64()->OptionalHeader.DataDirectory[i].Size); } } WritePerfDataInt("PE additional info", "PE additional info", "standard byte", "bytes",miscPESize); WritePerfDataFloat("PE additional info", "PE additional info", "percentage", "percent", (float) ((miscPESize * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "PE additional info : %d", miscPESize); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (miscPESize * 100) / fileSize); printLine(GUICookie,szStr); WORD numberOfSections; if (g_pPELoader->IsPE32()) { numberOfSections = VAL16(g_pPELoader->ntHeaders32()->FileHeader.NumberOfSections); } else { numberOfSections = VAL16(g_pPELoader->ntHeaders64()->FileHeader.NumberOfSections); } WritePerfDataInt("Num.of PE sections", "Num.of PE sections", "Nbr of sections", "sections",numberOfSections); sprintf_s(szString,SZSTRING_SIZE,"// Num.of PE sections : %d", numberOfSections); printLine(GUICookie,szStr); WritePerfDataInt("CLR header size", "CLR header size", "byte", "bytes",VAL32(CORHeader->cb)); WritePerfDataFloat("CLR header size", "CLR header size", "percentage", "percent",(float) ((VAL32(CORHeader->cb) * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "CLR header size : %d", VAL32(CORHeader->cb)); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (VAL32(CORHeader->cb) * 100) / fileSize); printLine(GUICookie,szStr); DWORD dwMetaSize = g_cbMetaData; WritePerfDataInt("CLR meta-data size", "CLR meta-data size", "bytes", "bytes",dwMetaSize); WritePerfDataFloat("CLR meta-data size", "CLR meta-data size", "percentage", "percent",(float) ((dwMetaSize * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "CLR meta-data size : %d", dwMetaSize); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (dwMetaSize * 100) / fileSize); printLine(GUICookie,szStr); IMAGE_DATA_DIRECTORY *pFirst = &CORHeader->Resources; ULONG32 iCount = (ULONG32)((BYTE *) &CORHeader->ManagedNativeHeader - (BYTE *) &CORHeader->Resources) / sizeof(IMAGE_DATA_DIRECTORY) + 1; miscCOMPlusSize = 0; for (ULONG32 iDir=0; iDir<iCount; iDir++) { miscCOMPlusSize += VAL32(pFirst->Size); pFirst++; } WritePerfDataInt("CLR Additional info", "CLR Additional info", "bytes", "bytes",miscCOMPlusSize); WritePerfDataFloat("CLR Additional info", "CLR Additional info", "percentage", "percent",(float) ((miscCOMPlusSize * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "CLR additional info : %d", miscCOMPlusSize); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (miscCOMPlusSize * 100) / fileSize); printLine(GUICookie,szStr); // Go through each method def collecting some statistics. methodHeaderSize = methodBodySize = 0; methodBodies = fatHeaders = tinyHeaders = deprecatedHeaders = fatSections = smallSections = 0; methodDefs = g_pImport->GetCountWithTokenKind(mdtMethodDef); for (i=1; i <= methodDefs; ++i) { ULONG rva; DWORD flags; if (FAILED(g_pImport->GetMethodImplProps(TokenFromRid(i, mdtMethodDef), &rva, &flags))) { continue; } if ((rva != 0)&&(IsMiIL(flags) || IsMiOPTIL(flags))) // We don't handle native yet. { ++methodBodies; COR_ILMETHOD_FAT *pMethod = NULL; g_pPELoader->getVAforRVA(rva, (void **) &pMethod); if (pMethod->IsFat()) { ++fatHeaders; methodHeaderSize += pMethod->GetSize() * 4; methodBodySize += pMethod->GetCodeSize(); // Add in the additional sections. BYTE *sectsBegin = (BYTE *) (pMethod->GetCode() + pMethod->GetCodeSize()); const COR_ILMETHOD_SECT *pSect = pMethod->GetSect(); const COR_ILMETHOD_SECT *pOldSect; if (pSect != NULL) { // Keep skipping a pointer past each section. do { pOldSect = pSect; if (((COR_ILMETHOD_SECT_FAT *) pSect)->GetKind() & CorILMethod_Sect_FatFormat) { ++fatSections; pSect = (COR_ILMETHOD_SECT *)((BYTE *) pSect + ((COR_ILMETHOD_SECT_FAT *) pSect)->GetDataSize()); } else { ++smallSections; pSect = (COR_ILMETHOD_SECT *)((BYTE *) pSect + ((COR_ILMETHOD_SECT_SMALL *) pSect)->DataSize); } pSect = (COR_ILMETHOD_SECT *) (((UINT_PTR) pSect + 3) & ~3); } while (pOldSect->More()); // Add on the section sizes. methodHeaderSize += (int) ((BYTE *) pSect - sectsBegin); } } else if (((COR_ILMETHOD_TINY *) pMethod)->IsTiny()) { ++tinyHeaders; methodHeaderSize += sizeof(COR_ILMETHOD_TINY); methodBodySize += ((COR_ILMETHOD_TINY *) pMethod)->GetCodeSize(); } else { _ASSERTE(!"Unrecognized header type"); } } } WritePerfDataInt("CLR method headers", "CLR method headers", "bytes", "bytes",methodHeaderSize); WritePerfDataFloat("CLR method headers", "CLR method headers", "percentage", "percent",(float) ((methodHeaderSize * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "CLR method headers : %d", methodHeaderSize); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (methodHeaderSize * 100) / fileSize); printLine(GUICookie,szStr); WritePerfDataInt("Managed code", "Managed code", "bytes", "bytes",methodBodySize); WritePerfDataFloat("Managed code", "Managed code", "percentage", "percent",(float) ((methodBodySize * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "Managed code : %d", methodBodySize); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (methodBodySize * 100) / fileSize); printLine(GUICookie,szStr); if (g_pPELoader->IsPE32()) { DWORD sizeOfInitializedData = VAL32(g_pPELoader->ntHeaders32()->OptionalHeader.SizeOfInitializedData); WritePerfDataInt("Data", "Data", "bytes", "bytes",sizeOfInitializedData); WritePerfDataFloat("Data", "Data", "percentage", "percent",(float) ((sizeOfInitializedData * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "Data : %d", sizeOfInitializedData); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (sizeOfInitializedData * 100) / fileSize); printLine(GUICookie,szStr); size = fileSize - g_pPELoader->ntHeaders32()->OptionalHeader.SizeOfHeaders - miscPESize - CORHeader->cb - g_cbMetaData - miscCOMPlusSize - sizeOfInitializedData - methodHeaderSize - methodBodySize; } else { DWORD sizeOfInitializedData = VAL32(g_pPELoader->ntHeaders64()->OptionalHeader.SizeOfInitializedData); WritePerfDataInt("Data", "Data", "bytes", "bytes",sizeOfInitializedData); WritePerfDataFloat("Data", "Data", "percentage", "percent",(float) ((sizeOfInitializedData * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "Data : %d", sizeOfInitializedData); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (sizeOfInitializedData * 100) / fileSize); printLine(GUICookie,szStr); size = fileSize - g_pPELoader->ntHeaders64()->OptionalHeader.SizeOfHeaders - miscPESize - CORHeader->cb - g_cbMetaData - miscCOMPlusSize - sizeOfInitializedData - methodHeaderSize - methodBodySize; } WritePerfDataInt("Unaccounted", "Unaccounted", "bytes", "bytes",size); WritePerfDataFloat("Unaccounted", "Unaccounted", "percentage", "percent",(float) ((size * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "Unaccounted : %d", size); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (size * 100) / fileSize); printLine(GUICookie,szStr); // Detail... if (g_pPELoader->IsPE32()) { numberOfSections = VAL16(g_pPELoader->ntHeaders32()->FileHeader.NumberOfSections); WritePerfDataInt("Num.of PE sections", "Num.of PE sections", "bytes", "bytes",numberOfSections); printLine(GUICookie,""); sprintf_s(szString,SZSTRING_SIZE,"// Num.of PE sections : %d", numberOfSections); printLine(GUICookie,szStr); IMAGE_SECTION_HEADER *pSecHdr = IMAGE_FIRST_SECTION(g_pPELoader->ntHeaders32()); for (i=0; i < numberOfSections; ++i) { WritePerfDataInt((char*)pSecHdr->Name,(char*)pSecHdr->Name, "bytes", "bytes",VAL32(pSecHdr->SizeOfRawData)); sprintf_s(szString,SZSTRING_SIZE,"// %-8s - %d", pSecHdr->Name, VAL32(pSecHdr->SizeOfRawData)); printLine(GUICookie,szStr); ++pSecHdr; } } else { numberOfSections = VAL16(g_pPELoader->ntHeaders64()->FileHeader.NumberOfSections); WritePerfDataInt("Num.of PE sections", "Num.of PE sections", "bytes", "bytes",numberOfSections); printLine(GUICookie,""); sprintf_s(szString,SZSTRING_SIZE,"// Num.of PE sections : %d", numberOfSections); printLine(GUICookie,szStr); IMAGE_SECTION_HEADER *pSecHdr = IMAGE_FIRST_SECTION(g_pPELoader->ntHeaders64()); for (i=0; i < numberOfSections; ++i) { WritePerfDataInt((char*)pSecHdr->Name,(char*)pSecHdr->Name, "bytes", "bytes",pSecHdr->SizeOfRawData); sprintf_s(szString,SZSTRING_SIZE,"// %-8s - %d", pSecHdr->Name, pSecHdr->SizeOfRawData); printLine(GUICookie,szStr); ++pSecHdr; } } if (FAILED(g_pPubImport->QueryInterface(IID_IMetaDataTables, (void**)&pITables))) { sprintf_s(szString,SZSTRING_SIZE,"// Unable to get IMetaDataTables interface"); printLine(GUICookie,szStr); return; } if (pITables == 0) { printLine(GUICookie,RstrUTF(IDS_E_MDDETAILS)); return; } else { DWORD Size = g_cbMetaData; WritePerfDataInt("CLR meta-data size", "CLR meta-data size", "bytes", "bytes",Size); printLine(GUICookie,""); sprintf_s(szString,SZSTRING_SIZE,"// CLR meta-data size : %d", Size); printLine(GUICookie,szStr); metaSize = 0; pITables->GetTableInfo(TBL_Module, &sizeRec, &count, NULL, NULL, NULL); TableSeen(TBL_Module); metaSize += size = count * sizeRec; \ WritePerfDataInt("Module (count)", "Module (count)", "count", "count",count); WritePerfDataInt("Module (bytes)", "Module (bytes)", "bytes", "bytes",size); sprintf_s(szString,SZSTRING_SIZE,"// %-14s- %4d (%d bytes)", "Module", count, size); \ printLine(GUICookie,szStr); if ((count = g_pImport->GetCountWithTokenKind(mdtTypeDef)) > 0) { int flags, interfaces = 0, explicitLayout = 0; for (i=1; i <= count; ++i) { if (FAILED(g_pImport->GetTypeDefProps(TokenFromRid(i, mdtTypeDef), (ULONG *) &flags, NULL))) { continue; } if (flags & tdInterface) ++interfaces; if (flags & tdExplicitLayout) ++explicitLayout; } // Get count from table -- count reported by GetCount... doesn't include the "global" typedef. pITables->GetTableInfo(TBL_TypeDef, &sizeRec, &count, NULL, NULL, NULL); TableSeen(TBL_TypeDef); metaSize += size = count * sizeRec; WritePerfDataInt("TypeDef (count)", "TypeDef (count)", "count", "count", count); WritePerfDataInt("TypeDef (bytes)", "TypeDef (bytes)", "bytes", "bytes", size); WritePerfDataInt("interfaces", "interfaces", "count", "count", interfaces); WritePerfDataInt("explicitLayout", "explicitLayout", "count", "count", explicitLayout); sprintf_s(buf, MAX_MEMBER_LENGTH, " TypeDef - %4d (%d bytes)", count, size); sprintf_s(szString,SZSTRING_SIZE,"// %-38s %d interfaces, %d explicit layout", buf, interfaces, explicitLayout); printLine(GUICookie,szStr); } } pITables->GetTableInfo(TBL_TypeRef, &sizeRec, &count, NULL, NULL, NULL); TableSeen(TBL_TypeRef); if (count > 0) { metaSize += size = count * sizeRec; \ WritePerfDataInt("TypeRef (count)", "TypeRef (count)", "count", "count", count); WritePerfDataInt("TypeRef (bytes)", "TypeRef (bytes)", "bytes", "bytes", size); sprintf_s(szString,SZSTRING_SIZE,"// %-14s- %4d (%d bytes)", "TypeRef", count, size); \ printLine(GUICookie,szStr); } if ((count = g_pImport->GetCountWithTokenKind(mdtMethodDef)) > 0) { int flags, abstract = 0, native = 0; for (i=1; i <= count; ++i) { if (FAILED(g_pImport->GetMethodDefProps(TokenFromRid(i, mdtMethodDef), (DWORD *)&flags))) { sprintf_s(szString, SZSTRING_SIZE, "// Invalid MethodDef %08X record", TokenFromRid(i, mdtMethodDef)); printLine(GUICookie, szStr); return; } if (flags & mdAbstract) ++abstract; } pITables->GetTableInfo(TBL_Method, &sizeRec, NULL, NULL, NULL, NULL); TableSeen(TBL_Method); if (count > 0) { metaSize += size = count * sizeRec; WritePerfDataInt("MethodDef (count)", "MethodDef (count)", "count", "count", count); WritePerfDataInt("MethodDef (bytes)", "MethodDef (bytes)", "bytes", "bytes", size); WritePerfDataInt("abstract", "abstract", "count", "count", abstract); WritePerfDataInt("native", "native", "count", "count", native); WritePerfDataInt("methodBodies", "methodBodies", "count", "count", methodBodies); sprintf_s(buf, MAX_MEMBER_LENGTH, " MethodDef - %4d (%d bytes)", count, size); sprintf_s(szString,SZSTRING_SIZE,"// %-38s %d abstract, %d native, %d bodies", buf, abstract, native, methodBodies); printLine(GUICookie,szStr); } } if ((count = g_pImport->GetCountWithTokenKind(mdtFieldDef)) > 0) { int flags, constants = 0; for (i=1; i <= count; ++i) { if (FAILED(g_pImport->GetFieldDefProps(TokenFromRid(i, mdtFieldDef), (DWORD *)&flags))) { sprintf_s(szString, SZSTRING_SIZE, "// Invalid FieldDef %08X record", TokenFromRid(i, mdtFieldDef)); printLine(GUICookie, szStr); return; } if ((flags & (fdStatic|fdInitOnly)) == (fdStatic|fdInitOnly)) ++constants; } pITables->GetTableInfo(TBL_Field, &sizeRec, NULL, NULL, NULL, NULL); metaSize += size = count * sizeRec; WritePerfDataInt("FieldDef (count)", "FieldDef (count)", "count", "count", count); WritePerfDataInt("FieldDef (bytes)", "FieldDef (bytes)", "bytes", "bytes", size); WritePerfDataInt("constant", "constant", "count", "count", constants); sprintf_s(buf, MAX_MEMBER_LENGTH, " FieldDef - %4d (%d bytes)", count, size); sprintf_s(szString,SZSTRING_SIZE,"// %-38s %d constant", buf, constants); printLine(GUICookie,szStr); TableSeen(TBL_Field); } DumpTable(TBL_MemberRef, "MemberRef", GUICookie); DumpTable(TBL_Param, "ParamDef", GUICookie); DumpTable(TBL_MethodImpl, "MethodImpl", GUICookie); DumpTable(TBL_Constant, "Constant", GUICookie); DumpTable(TBL_CustomAttribute, "CustomAttribute", GUICookie); DumpTable(TBL_FieldMarshal, "NativeType", GUICookie); DumpTable(TBL_ClassLayout, "ClassLayout", GUICookie); DumpTable(TBL_FieldLayout, "FieldLayout", GUICookie); DumpTable(TBL_StandAloneSig, "StandAloneSig", GUICookie); DumpTable(TBL_InterfaceImpl, "InterfaceImpl", GUICookie); DumpTable(TBL_PropertyMap, "PropertyMap", GUICookie); DumpTable(TBL_Property, "Property", GUICookie); DumpTable(TBL_MethodSemantics, "MethodSemantic", GUICookie); DumpTable(TBL_DeclSecurity, "Security", GUICookie); DumpTable(TBL_TypeSpec, "TypeSpec", GUICookie); DumpTable(TBL_ModuleRef, "ModuleRef", GUICookie); DumpTable(TBL_Assembly, "Assembly", GUICookie); DumpTable(TBL_AssemblyProcessor, "AssemblyProcessor", GUICookie); DumpTable(TBL_AssemblyOS, "AssemblyOS", GUICookie); DumpTable(TBL_AssemblyRef, "AssemblyRef", GUICookie); DumpTable(TBL_AssemblyRefProcessor, "AssemblyRefProcessor", GUICookie); DumpTable(TBL_AssemblyRefOS, "AssemblyRefOS", GUICookie); DumpTable(TBL_File, "File", GUICookie); DumpTable(TBL_ExportedType, "ExportedType", GUICookie); DumpTable(TBL_ManifestResource, "ManifestResource", GUICookie); DumpTable(TBL_NestedClass, "NestedClass", GUICookie); // Rest of the tables. pITables->GetNumTables(&count); for (i=0; i<count; ++i) { if (!IsTableSeen(i)) DumpTable(i, NULL, GUICookie); } // String heap pITables->GetStringHeapSize(&sizeRec); if (sizeRec > 0) { metaSize += sizeRec; WritePerfDataInt("Strings", "Strings", "bytes", "bytes",sizeRec); sprintf_s(szString,SZSTRING_SIZE,"// Strings - %5d bytes", sizeRec); printLine(GUICookie,szStr); } // Blob heap pITables->GetBlobHeapSize(&sizeRec); if (sizeRec > 0) { metaSize += sizeRec; WritePerfDataInt("Blobs", "Blobs", "bytes", "bytes",sizeRec); sprintf_s(szString,SZSTRING_SIZE,"// Blobs - %5d bytes", sizeRec); printLine(GUICookie,szStr); } // User String Heap pITables->GetUserStringHeapSize(&sizeRec); if (sizeRec > 0) { metaSize += sizeRec; WritePerfDataInt("UserStrings", "UserStrings", "bytes", "bytes",sizeRec); sprintf_s(szString,SZSTRING_SIZE,"// UserStrings - %5d bytes", sizeRec); printLine(GUICookie,szStr); } // Guid heap pITables->GetGuidHeapSize(&sizeRec); if (sizeRec > 0) { metaSize += sizeRec; WritePerfDataInt("Guids", "Guids", "bytes", "bytes", sizeRec); sprintf_s(szString,SZSTRING_SIZE,"// Guids - %5d bytes", sizeRec); printLine(GUICookie,szStr); } if (g_cbMetaData - metaSize > 0) { WritePerfDataInt("Uncategorized", "Uncategorized", "bytes", "bytes",g_cbMetaData - metaSize); sprintf_s(szString,SZSTRING_SIZE,"// Uncategorized - %5d bytes", g_cbMetaData - metaSize); printLine(GUICookie,szStr); } if (miscCOMPlusSize != 0) { WritePerfDataInt("CLR additional info", "CLR additional info", "bytes", "bytes", miscCOMPlusSize); sprintf_s(szString,SZSTRING_SIZE,"// CLR additional info : %d", miscCOMPlusSize); printLine(GUICookie,""); printLine(GUICookie,szStr); if (CORHeader->CodeManagerTable.Size != 0) { WritePerfDataInt("CodeManagerTable", "CodeManagerTable", "bytes", "bytes", VAL32(CORHeader->CodeManagerTable.Size)); sprintf_s(szString,SZSTRING_SIZE,"// CodeManagerTable - %d", VAL32(CORHeader->CodeManagerTable.Size)); printLine(GUICookie,szStr); } if (CORHeader->VTableFixups.Size != 0) { WritePerfDataInt("VTableFixups", "VTableFixups", "bytes", "bytes", VAL32(CORHeader->VTableFixups.Size)); sprintf_s(szString,SZSTRING_SIZE,"// VTableFixups - %d", VAL32(CORHeader->VTableFixups.Size)); printLine(GUICookie,szStr); } if (CORHeader->Resources.Size != 0) { WritePerfDataInt("Resources", "Resources", "bytes", "bytes", VAL32(CORHeader->Resources.Size)); sprintf_s(szString,SZSTRING_SIZE,"// Resources - %d", VAL32(CORHeader->Resources.Size)); printLine(GUICookie,szStr); } } WritePerfDataInt("CLR method headers", "CLR method headers", "count", "count", methodHeaderSize); sprintf_s(szString,SZSTRING_SIZE,"// CLR method headers : %d", methodHeaderSize); printLine(GUICookie,""); printLine(GUICookie,szStr); WritePerfDataInt("Num.of method bodies", "Num.of method bodies", "count", "count",methodBodies); sprintf_s(szString,SZSTRING_SIZE,"// Num.of method bodies - %d", methodBodies); printLine(GUICookie,szStr); WritePerfDataInt("Num.of fat headers", "Num.of fat headers", "count", "count", fatHeaders); sprintf_s(szString,SZSTRING_SIZE,"// Num.of fat headers - %d", fatHeaders); printLine(GUICookie,szStr); WritePerfDataInt("Num.of tiny headers", "Num.of tiny headers", "count", "count", tinyHeaders); sprintf_s(szString,SZSTRING_SIZE,"// Num.of tiny headers - %d", tinyHeaders); printLine(GUICookie,szStr); if (deprecatedHeaders > 0) { WritePerfDataInt("Num.of old headers", "Num.of old headers", "count", "count", deprecatedHeaders); sprintf_s(szString,SZSTRING_SIZE,"// Num.of old headers - %d", deprecatedHeaders); printLine(GUICookie,szStr); } if (fatSections != 0 || smallSections != 0) { WritePerfDataInt("Num.of fat sections", "Num.of fat sections", "count", "count", fatSections); sprintf_s(szString,SZSTRING_SIZE,"// Num.of fat sections - %d", fatSections); printLine(GUICookie,szStr); WritePerfDataInt("Num.of small section", "Num.of small section", "count", "count", smallSections); sprintf_s(szString,SZSTRING_SIZE,"// Num.of small sections - %d", smallSections); printLine(GUICookie,szStr); } WritePerfDataInt("Managed code", "Managed code", "bytes", "bytes", methodBodySize); sprintf_s(szString,SZSTRING_SIZE,"// Managed code : %d", methodBodySize); printLine(GUICookie,""); printLine(GUICookie,szStr); if (methodBodies != 0) { WritePerfDataInt("Ave method size", "Ave method size", "bytes", "bytes", methodBodySize / methodBodies); sprintf_s(szString,SZSTRING_SIZE,"// Ave method size - %d", methodBodySize / methodBodies); printLine(GUICookie,szStr); } if (pITables) pITables->Release(); if(g_fDumpToPerfWriter) CloseHandle((char*) g_PerfDataFilePtr); } #ifdef _PREFAST_ #pragma warning(pop) #endif void DumpHexbytes(__inout __nullterminated char* szptr,BYTE *pb, DWORD fromPtr, DWORD toPtr, DWORD limPtr) { char sz[256]; int k = 0,i; DWORD curPtr = 0; bool printsz = FALSE; BYTE zero = 0; *szptr = 0; for(i = 0,k = 0,curPtr=fromPtr; curPtr < toPtr; i++,k++,curPtr++,pb++) { if(k == 16) { if(printsz) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT(" // %s"),sz); printLine(g_pFile,szString); szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s ",g_szAsmCodeIndent); k = 0; printsz = FALSE; } if(curPtr >= limPtr) pb = &zero; // at limPtr and after, pad with 0 else { PAL_CPP_TRY { sz[k] = *pb; // check the ptr validity } PAL_CPP_CATCH_ALL { pb = &zero; } PAL_CPP_ENDTRY; } szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," %2.2X", *pb); if(isprint(*pb)) { if(g_fDumpRTF) { if((*pb == '\\')||(*pb=='{')||(*pb=='}')) sz[k++]='\\'; sz[k] = *pb; } else if(g_fDumpHTML) { if(*pb == '<') { sz[k] = 0; strcat_s(sz,256-k,LTN()); k+=(int)(strlen(LTN())); } else if(*pb == '>') { sz[k] = 0; strcat_s(sz,256-k,GTN()); k+=(int)(strlen(GTN())); } } else sz[k] = *pb; printsz = TRUE; } else { sz[k] = '.'; } sz[k+1] = 0; } szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),") "); if(printsz) { for(i = k; i < 16; i++) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," "); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("// %s"),sz); } printLine(g_pFile,szString); } struct VTableEntry { DWORD dwAddr; WORD wCount; WORD wType; }; struct ExpDirTable { DWORD dwFlags; DWORD dwDateTime; WORD wVMajor; WORD wVMinor; DWORD dwNameRVA; DWORD dwOrdinalBase; DWORD dwNumATEntries; DWORD dwNumNamePtrs; DWORD dwAddrTableRVA; DWORD dwNamePtrRVA; DWORD dwOrdTableRVA; }; void DumpEATEntries(void* GUICookie, IMAGE_NT_HEADERS32 *pNTHeader32, IMAGE_OPTIONAL_HEADER32 *pOptHeader32, IMAGE_NT_HEADERS64 *pNTHeader64, IMAGE_OPTIONAL_HEADER64 *pOptHeader64) { IMAGE_DATA_DIRECTORY *pExportDir = NULL; IMAGE_SECTION_HEADER *pSecHdr = NULL; DWORD i,j,N; BOOL bpOpt = FALSE; if (g_pPELoader->IsPE32()) { pExportDir = pOptHeader32->DataDirectory; pSecHdr = IMAGE_FIRST_SECTION(pNTHeader32); N = VAL16(pNTHeader32->FileHeader.NumberOfSections); if (pOptHeader32->NumberOfRvaAndSizes) bpOpt = TRUE; } else { pExportDir = pOptHeader64->DataDirectory; pSecHdr = IMAGE_FIRST_SECTION(pNTHeader64); N = VAL16(pNTHeader64->FileHeader.NumberOfSections); if (pOptHeader64->NumberOfRvaAndSizes) bpOpt = TRUE; } if(bpOpt) { ExpDirTable *pExpTable = NULL; if(pExportDir->Size) { #ifdef _DEBUG printLine(GUICookie,COMMENT((char*)0)); // start multiline comment sprintf_s(szString,SZSTRING_SIZE,"// Export dir VA=%X size=%X ",VAL32(pExportDir->VirtualAddress),VAL32(pExportDir->Size)); printLine(GUICookie,szString); #endif DWORD vaExpTable = VAL32(pExportDir->VirtualAddress); for (i=0; i < N; i++,pSecHdr++) { if((vaExpTable >= VAL32(pSecHdr->VirtualAddress))&& (vaExpTable < VAL32(pSecHdr->VirtualAddress)+VAL32(pSecHdr->Misc.VirtualSize))) { pExpTable = (ExpDirTable*)( g_pPELoader->base() + VAL32(pSecHdr->PointerToRawData) + vaExpTable - VAL32(pSecHdr->VirtualAddress)); #ifdef _DEBUG sprintf_s(szString,SZSTRING_SIZE,"// in section '%s': VA=%X Misc.VS=%X PRD=%X ",(char*)(pSecHdr->Name), VAL32(pSecHdr->VirtualAddress),VAL32(pSecHdr->Misc.VirtualSize),VAL32(pSecHdr->PointerToRawData)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// Export Directory Table:"); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwFlags = %X",VAL32(pExpTable->dwFlags)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwDateTime = %X",VAL32(pExpTable->dwDateTime)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// wVMajor = %X",VAL16(pExpTable->wVMajor)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// wVMinor = %X",VAL16(pExpTable->wVMinor)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwNameRVA = %X",VAL32(pExpTable->dwNameRVA)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwOrdinalBase = %X",VAL32(pExpTable->dwOrdinalBase)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwNumATEntries = %X",VAL32(pExpTable->dwNumATEntries)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwNumNamePtrs = %X",VAL32(pExpTable->dwNumNamePtrs)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwAddrTableRVA = %X",VAL32(pExpTable->dwAddrTableRVA)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwNamePtrRVA = %X",VAL32(pExpTable->dwNamePtrRVA)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwOrdTableRVA = %X",VAL32(pExpTable->dwOrdTableRVA)); printLine(GUICookie,szString); if(pExpTable->dwNameRVA) { char* szName; if(g_pPELoader->getVAforRVA(VAL32(pExpTable->dwNameRVA), (void **) &szName)) sprintf_s(szString,SZSTRING_SIZE,"// DLL Name: '%s'",szName); else sprintf_s(szString,SZSTRING_SIZE,"// DLL Name: BAD RVA: 0x%8.8X",VAL32(pExpTable->dwNameRVA)); printLine(GUICookie,szString); } #endif if(pExpTable->dwNumATEntries && pExpTable->dwAddrTableRVA) { DWORD* pExpAddr = NULL; BYTE *pCont = NULL; DWORD dwTokRVA; mdToken* pTok; g_pPELoader->getVAforRVA(VAL32(pExpTable->dwAddrTableRVA), (void **) &pExpAddr); #ifdef _DEBUG sprintf_s(szString,SZSTRING_SIZE,"// Export Address Table:"); printLine(GUICookie,szString); #endif g_nEATableRef = VAL32(pExpTable->dwNumATEntries); if (g_prEATableRef == NULL) { g_prEATableRef = new DynamicArray<EATableRef>; } (*g_prEATableRef)[g_nEATableRef].tkTok = 0; // to avoid multiple reallocations of DynamicArray for(j=0; j < VAL32(pExpTable->dwNumATEntries); j++,pExpAddr++) { g_pPELoader->getVAforRVA(VAL32(*pExpAddr), (void **) &pCont); #ifdef _DEBUG sprintf_s(szString,SZSTRING_SIZE,"// [%d]: RVA=%X VA=%p(",j,VAL32(*pExpAddr),pCont); DumpByteArray(szString,pCont,16,GUICookie); printLine(GUICookie,szString); #endif (*g_prEATableRef)[j].tkTok = 0; if(g_pPELoader->IsPE32()) { dwTokRVA = VAL32(*((DWORD*)(pCont+2))); // first two bytes - JumpIndirect (0x25FF) dwTokRVA -= VAL32((DWORD)pOptHeader32->ImageBase); } else { ULONGLONG ullTokRVA; if(pNTHeader64->FileHeader.Machine == IMAGE_FILE_MACHINE_IA64) ullTokRVA = VAL64(*((ULONGLONG*)(pCont+8))); else ullTokRVA = VAL64(*((ULONGLONG*)(pCont+2))); dwTokRVA =(DWORD)(ullTokRVA - VAL64((DWORD)pOptHeader64->ImageBase)); } if(g_pPELoader->getVAforRVA(dwTokRVA,(void**)&pTok)) (*g_prEATableRef)[j].tkTok = VAL32(*pTok); (*g_prEATableRef)[j].pszName = NULL; } } if(pExpTable->dwNumNamePtrs && pExpTable->dwNamePtrRVA && pExpTable->dwOrdTableRVA) { DWORD* pNamePtr = NULL; WORD* pOrd = NULL; char* szName = NULL; g_pPELoader->getVAforRVA(VAL32(pExpTable->dwNamePtrRVA), (void **) &pNamePtr); g_pPELoader->getVAforRVA(VAL32(pExpTable->dwOrdTableRVA), (void **) &pOrd); #ifdef _DEBUG sprintf_s(szString,SZSTRING_SIZE,"// Export Names:"); printLine(GUICookie,szString); #endif for(j=0; j < VAL32(pExpTable->dwNumATEntries); j++,pNamePtr++,pOrd++) { g_pPELoader->getVAforRVA(VAL32(*pNamePtr), (void **) &szName); #ifdef _DEBUG sprintf_s(szString,SZSTRING_SIZE,"// [%d]: NamePtr=%X Ord=%X Name='%s'",j,VAL32(*pNamePtr),*pOrd,szName); printLine(GUICookie,szString); #endif (*g_prEATableRef)[VAL16(*pOrd)].pszName = szName; } } g_nEATableBase = pExpTable->dwOrdinalBase; break; } } #ifdef _DEBUG printLine(GUICookie,COMMENT((char*)-1)); // end multiline comment #endif } } } // helper to avoid mixing of SEH and stack objects with destructors void DumpEATEntriesWrapper(void* GUICookie, IMAGE_NT_HEADERS32 *pNTHeader32, IMAGE_OPTIONAL_HEADER32 *pOptHeader32, IMAGE_NT_HEADERS64 *pNTHeader64, IMAGE_OPTIONAL_HEADER64 *pOptHeader64) { PAL_CPP_TRY { DumpEATEntries(GUICookie, pNTHeader32, pOptHeader32, pNTHeader64, pOptHeader64); } PAL_CPP_CATCH_ALL { printError(GUICookie,"// ERROR READING EXPORT ADDRESS TABLE"); if (g_prEATableRef != NULL) { SDELETE(g_prEATableRef); } g_nEATableRef = 0; } PAL_CPP_ENDTRY } void DumpVtable(void* GUICookie) { // VTable : primary processing DWORD pVTable=0; VTableEntry* pVTE; DWORD i,j,k; char* szptr; IMAGE_NT_HEADERS32 *pNTHeader32 = NULL; IMAGE_OPTIONAL_HEADER32 *pOptHeader32 = NULL; IMAGE_NT_HEADERS64 *pNTHeader64 = NULL; IMAGE_OPTIONAL_HEADER64 *pOptHeader64 = NULL; if (g_pPELoader->IsPE32()) { pNTHeader32 = g_pPELoader->ntHeaders32(); pOptHeader32 = &pNTHeader32->OptionalHeader; sprintf_s(szString,SZSTRING_SIZE,"%s%s 0x%08x", g_szAsmCodeIndent,KEYWORD(".imagebase"),VAL32(pOptHeader32->ImageBase)); printLine(GUICookie,szString); j = VAL16(pOptHeader32->Subsystem); sprintf_s(szString,SZSTRING_SIZE,"%s%s 0x%08x", g_szAsmCodeIndent,KEYWORD(".file alignment"),VAL32(pOptHeader32->FileAlignment)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"%s%s 0x%08x", g_szAsmCodeIndent,KEYWORD(".stackreserve"),VAL32(pOptHeader32->SizeOfStackReserve)); printLine(GUICookie,szString); } else { pNTHeader64 = g_pPELoader->ntHeaders64(); pOptHeader64 = &pNTHeader64->OptionalHeader; sprintf_s(szString,SZSTRING_SIZE,"%s%s 0x%016I64x", g_szAsmCodeIndent,KEYWORD(".imagebase"),VAL64(pOptHeader64->ImageBase)); printLine(GUICookie,szString); j = VAL16(pOptHeader64->Subsystem); sprintf_s(szString,SZSTRING_SIZE,"%s%s 0x%08x", g_szAsmCodeIndent,KEYWORD(".file alignment"),VAL32(pOptHeader64->FileAlignment)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"%s%s 0x%016I64x", g_szAsmCodeIndent,KEYWORD(".stackreserve"),VAL64(pOptHeader64->SizeOfStackReserve)); printLine(GUICookie,szString); } szptr = &szString[0]; szptr += sprintf_s(szString,SZSTRING_SIZE,"%s%s 0x%04x", g_szAsmCodeIndent,KEYWORD(".subsystem"),j); { const char* psz[15] = {"// UNKNOWN", "// NATIVE", "// WINDOWS_GUI", "// WINDOWS_CUI", "// <illegal value>", "// OS2_CUI", "// <illegal value>", "// POSIX_CUI", "// NATIVE_WINDOWS", "// WINDOWS_CE_GUI", "// EFI_APPLICATION", "// EFI_BOOT_SERVICE_DRIVER", "// EFI_RUNTIME_DRIVER", "// EFI_ROM", "// XBOX" }; if(j > 14) j = 4; // <illegal value> sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," %s",COMMENT(psz[j])); } printLine(GUICookie,szString); szptr = &szString[0]; i = (DWORD)VAL32(g_CORHeader->Flags); szptr += sprintf_s(szString,SZSTRING_SIZE,"%s%s 0x%08x", g_szAsmCodeIndent,KEYWORD(".corflags"),i); if(i != 0) { char sz[256], *szp = sz; szp += sprintf_s(szp,256," // "); if(i & COMIMAGE_FLAGS_ILONLY) szp += sprintf_s(szp,256-(szp-sz)," ILONLY"); if(COR_IS_32BIT_REQUIRED(i)) szp += sprintf_s(szp,256-(szp-sz)," 32BITREQUIRED"); if(COR_IS_32BIT_PREFERRED(i)) szp += sprintf_s(szp,256-(szp-sz)," 32BITPREFERRED"); if(i & COMIMAGE_FLAGS_IL_LIBRARY) szp += sprintf_s(szp,256-(szp-sz)," IL_LIBRARY"); if(i & COMIMAGE_FLAGS_TRACKDEBUGDATA) szp += sprintf_s(szp,256-(szp-sz)," TRACKDEBUGDATA"); szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT(sz)); } printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"%s// Image base: 0x%p",g_szAsmCodeIndent,g_pPELoader->base()); printLine(GUICookie,COMMENT(szString)); DumpEATEntriesWrapper(GUICookie, pNTHeader32, pOptHeader32, pNTHeader64, pOptHeader64); g_nVTableRef = 0; if(VAL32(g_CORHeader->VTableFixups.Size)) { IMAGE_SECTION_HEADER *pSecHdr = NULL; DWORD dwNumberOfSections; if (g_pPELoader->IsPE32()) { pSecHdr = IMAGE_FIRST_SECTION(g_pPELoader->ntHeaders32()); dwNumberOfSections = VAL16(g_pPELoader->ntHeaders32()->FileHeader.NumberOfSections); } else { pSecHdr = IMAGE_FIRST_SECTION(g_pPELoader->ntHeaders64()); dwNumberOfSections = VAL16(g_pPELoader->ntHeaders64()->FileHeader.NumberOfSections); } pVTable = VAL32(g_CORHeader->VTableFixups.VirtualAddress); for (i=0; i < dwNumberOfSections; i++,pSecHdr++) { if(((DWORD)pVTable >= VAL32(pSecHdr->VirtualAddress))&& ((DWORD)pVTable < VAL32(pSecHdr->VirtualAddress)+VAL32(pSecHdr->Misc.VirtualSize))) { pVTE = (VTableEntry*)( g_pPELoader->base() + VAL32(pSecHdr->PointerToRawData) + pVTable - VAL32(pSecHdr->VirtualAddress)); for(j=VAL32(g_CORHeader->VTableFixups.Size),k=0; j > 0; pVTE++, j-=sizeof(VTableEntry),k++) { szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s [%d] ",g_szAsmCodeIndent,KEYWORD(".vtfixup"),VAL16(pVTE->wCount)); DWORD dwSize = VAL16(pVTE->wCount) * 4; WORD wType = VAL16(pVTE->wType); if(wType & COR_VTABLE_32BIT) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("int32 ")); else if(wType & COR_VTABLE_64BIT) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("int64 ")); dwSize <<= 1; } if(wType & COR_VTABLE_FROM_UNMANAGED) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("fromunmanaged ")); if(wType & COR_VTABLE_CALL_MOST_DERIVED) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("callmostderived ")); if(wType & 0x8 /*COR_VTABLE_FROM_UNMANAGED_RETAIN_APPDOMAIN*/) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("retainappdomain ")); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("at ")); szptr = DumpDataPtr(szptr,VAL32(pVTE->dwAddr), dwSize); // Walk every v-table fixup entry and dump the slots. { BYTE *pSlot; if (g_pPELoader->getVAforRVA(VAL32(pVTE->dwAddr), (void **) &pSlot)) { char* szptr0 = szptr; szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," //"); for (WORD iSlot=0; iSlot<VAL16(pVTE->wCount); iSlot++) { mdMethodDef tkMethod = VAL32(*(DWORD *) pSlot); if (VAL16(pVTE->wType) & COR_VTABLE_32BIT) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," %08X", VAL32(*(DWORD *)pSlot)); pSlot += sizeof(DWORD); } else { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," %016I64X", VAL64(*(unsigned __int64 *)pSlot)); pSlot += sizeof(unsigned __int64); } if (g_prVTableRef == NULL) { g_prVTableRef = new DynamicArray<VTableRef>; } (*g_prVTableRef)[g_nVTableRef].tkTok = tkMethod; (*g_prVTableRef)[g_nVTableRef].wEntry = (WORD)k; (*g_prVTableRef)[g_nVTableRef].wSlot = iSlot; g_nVTableRef++; //ValidateToken(tkMethod, mdtMethodDef); } sprintf_s(szptr0,SZSTRING_REMAINING_SIZE(szptr0),COMMENT(szptr0)); } else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," %s",ERRORMSG(RstrUTF(IDS_E_BOGUSRVA))); } printLine(GUICookie,szString); } break; } } } } // MetaInfo integration: void DumpMI(_In_ __nullterminated const char *str) { static BOOL fInit = TRUE; static char* szStr = &szString[0]; static void* GUICookie; char* pch; // Reset if(str == (char*)-1) { fInit = TRUE; return; } // Init if(fInit) { strcpy_s(szString,5,"// "); fInit = FALSE; GUICookie = (void*)str; return; } // Normal work strcat_s(szString,SZSTRING_SIZE,str); if((pch = strchr(szStr,'\n'))) { *pch = 0; printLine(GUICookie,szStr); pch++; memcpy(&szString[3], pch, strlen(pch)+1); } } void DumpMetaInfo(_In_ __nullterminated const WCHAR* pwzFileName, _In_opt_z_ const char* pszObjFileName, void* GUICookie) { const WCHAR* pch = wcsrchr(pwzFileName,L'.'); DumpMI((char*)GUICookie); // initialize the print function for DumpMetaInfo if(pch && (!_wcsicmp(pch+1,W("lib")) || !_wcsicmp(pch+1,W("obj")))) { // This works only when all the rest does not // Init and run. if (MetaDataGetDispenser(CLSID_CorMetaDataDispenser, IID_IMetaDataDispenserEx, (void **)&g_pDisp)) { WCHAR *pwzObjFileName=NULL; if (pszObjFileName) { int nLength = (int) strlen(pszObjFileName)+1; pwzObjFileName = new WCHAR[nLength]; memset(pwzObjFileName,0,sizeof(WCHAR)*nLength); WszMultiByteToWideChar(CP_UTF8,0,pszObjFileName,-1,pwzObjFileName,nLength); } DisplayFile((WCHAR*)pwzFileName, true, g_ulMetaInfoFilter, pwzObjFileName, DumpMI); g_pDisp->Release(); g_pDisp = NULL; if (pwzObjFileName) VDELETE(pwzObjFileName); } } else { HRESULT hr = S_OK; if(g_pDisp == NULL) { hr = MetaDataGetDispenser(CLSID_CorMetaDataDispenser, IID_IMetaDataDispenserEx, (void **)&g_pDisp); } if(SUCCEEDED(hr)) { g_ValModuleType = ValidatorModuleTypePE; if(g_pAssemblyImport==NULL) g_pAssemblyImport = GetAssemblyImport(NULL); printLine(GUICookie,RstrUTF(IDS_E_MISTART)); //MDInfo metaDataInfo(g_pPubImport, g_pAssemblyImport, (LPCWSTR)pwzFileName, DumpMI, g_ulMetaInfoFilter); MDInfo metaDataInfo(g_pDisp,(LPCWSTR)pwzFileName, DumpMI, g_ulMetaInfoFilter); metaDataInfo.DisplayMD(); printLine(GUICookie,RstrUTF(IDS_E_MIEND)); } } DumpMI((char*)-1); // reset the print function for DumpMetaInfo } void DumpPreamble() { printLine(g_pFile,""); if(g_fDumpHTML) { printLine(g_pFile, "<FONT SIZE=4><B>"); } else if(g_fDumpRTF) { } sprintf_s(szString,SZSTRING_SIZE,"// Microsoft (R) .NET IL Disassembler. Version " CLR_PRODUCT_VERSION); printLine(g_pFile,COMMENT(szString)); if(g_fDumpHTML) { printLine(g_pFile, "</B></FONT>"); } else if(g_fDumpRTF) { } printLine(g_pFile,""); if(g_fLimitedVisibility || (!g_fShowCA) || (!g_fDumpAsmCode) || (g_Mode & (MODE_DUMP_CLASS | MODE_DUMP_CLASS_METHOD | MODE_DUMP_CLASS_METHOD_SIG))) { printLine(g_pFile,""); printLine(g_pFile,COMMENT(RstrUTF(IDS_E_PARTDASM))); printLine(g_pFile,""); } if(g_fLimitedVisibility) { strcpy_s(szString, SZSTRING_SIZE, RstrUTF(IDS_E_ONLYITEMS)); if(!g_fHidePub) strcat_s(szString, SZSTRING_SIZE," Public"); if(!g_fHidePriv) strcat_s(szString, SZSTRING_SIZE," Private"); if(!g_fHideFam) strcat_s(szString, SZSTRING_SIZE," Family"); if(!g_fHideAsm) strcat_s(szString, SZSTRING_SIZE," Assembly"); if(!g_fHideFAA) strcat_s(szString, SZSTRING_SIZE," FamilyANDAssembly"); if(!g_fHidePrivScope) strcat_s(szString, SZSTRING_SIZE," PrivateScope"); printLine(g_pFile,COMMENT(szString)); } } void DumpSummary() { ULONG i; const char *pcClass,*pcNS,*pcMember, *pcSig; char szFQN[4096]; HENUMInternal hEnum; mdToken tkMember; CQuickBytes qbMemberSig; PCCOR_SIGNATURE pComSig; ULONG cComSig; DWORD dwAttrs; mdToken tkEventType; printLine(g_pFile,"//============ S U M M A R Y ================================="); if (SUCCEEDED(g_pImport->EnumGlobalFunctionsInit(&hEnum))) { while(g_pImport->EnumNext(&hEnum, &tkMember)) { if (FAILED(g_pImport->GetNameOfMethodDef(tkMember, &pcMember)) || FAILED(g_pImport->GetSigOfMethodDef(tkMember, &cComSig, &pComSig))) { sprintf_s(szString, SZSTRING_SIZE, "// ERROR in the method record %08X", tkMember); printLine(g_pFile, szString); continue; } qbMemberSig.Shrink(0); pcSig = cComSig ? PrettyPrintSig(pComSig, cComSig, "", &qbMemberSig, g_pImport,NULL) : "NO SIGNATURE"; PREFIX_ASSUME(ProperName((char*)pcMember) != 0); sprintf_s(szString,SZSTRING_SIZE,"// %08X [GLM] %s : %s", tkMember,ProperName((char*)pcMember),pcSig); printLine(g_pFile,szString); } } g_pImport->EnumClose(&hEnum); if (SUCCEEDED(g_pImport->EnumGlobalFieldsInit(&hEnum))) { while(g_pImport->EnumNext(&hEnum, &tkMember)) { if (FAILED(g_pImport->GetNameOfFieldDef(tkMember, &pcMember)) || FAILED(g_pImport->GetSigOfFieldDef(tkMember, &cComSig, &pComSig))) { sprintf_s(szString, SZSTRING_SIZE, "// ERROR in the field record %08X", tkMember); printLine(g_pFile, szString); continue; } qbMemberSig.Shrink(0); pcSig = cComSig ? PrettyPrintSig(pComSig, cComSig, "", &qbMemberSig, g_pImport,NULL) : "NO SIGNATURE"; PREFIX_ASSUME(ProperName((char*)pcMember) != 0); sprintf_s(szString,SZSTRING_SIZE,"// %08X [GLF] %s : %s", tkMember,ProperName((char*)pcMember),pcSig); printLine(g_pFile,szString); } } g_pImport->EnumClose(&hEnum); for (i = 0; i < g_NumClasses; i++) { if (FAILED(g_pImport->GetNameOfTypeDef(g_cl_list[i], &pcClass, &pcNS))) { sprintf_s(szString, SZSTRING_SIZE, "// ERROR in the TypeDef record %08X", g_cl_list[i]); printLine(g_pFile, szString); continue; } PREFIX_ASSUME(ProperName((char*)pcClass) != 0); if(*pcNS) sprintf_s(szFQN,4096,"%s.%s", ProperName((char*)pcNS),ProperName((char*)pcClass)); else strcpy_s(szFQN,4096,ProperName((char*)pcClass)); sprintf_s(szString,SZSTRING_SIZE,"// %08X [CLS] %s", g_cl_list[i],szFQN); printLine(g_pFile,szString); if(SUCCEEDED(g_pImport->EnumInit(mdtMethodDef, g_cl_list[i], &hEnum))) { while(g_pImport->EnumNext(&hEnum, &tkMember)) { if (FAILED(g_pImport->GetNameOfMethodDef(tkMember, &pcMember)) || FAILED(g_pImport->GetSigOfMethodDef(tkMember, &cComSig, &pComSig))) { sprintf_s(szString, SZSTRING_SIZE, "// ERROR in the method record %08X", tkMember); printLine(g_pFile, szString); continue; } qbMemberSig.Shrink(0); pcSig = cComSig ? PrettyPrintSig(pComSig, cComSig, "", &qbMemberSig, g_pImport,NULL) : "NO SIGNATURE"; PREFIX_ASSUME(ProperName((char*)pcMember) != 0); sprintf_s(szString,SZSTRING_SIZE,"// %08X [MET] %s::%s : %s", tkMember,szFQN,ProperName((char*)pcMember),pcSig); printLine(g_pFile,szString); } } g_pImport->EnumClose(&hEnum); if(SUCCEEDED(g_pImport->EnumInit(mdtFieldDef, g_cl_list[i], &hEnum))) { while(g_pImport->EnumNext(&hEnum, &tkMember)) { if (FAILED(g_pImport->GetNameOfFieldDef(tkMember, &pcMember)) || FAILED(g_pImport->GetSigOfFieldDef(tkMember, &cComSig, &pComSig))) { sprintf_s(szString, SZSTRING_SIZE, "// ERROR in the field record %08X", tkMember); printLine(g_pFile, szString); continue; } qbMemberSig.Shrink(0); pcSig = cComSig ? PrettyPrintSig(pComSig, cComSig, "", &qbMemberSig, g_pImport,NULL) : "NO SIGNATURE"; PREFIX_ASSUME(ProperName((char*)pcMember) != 0); sprintf_s(szString,SZSTRING_SIZE,"// %08X [FLD] %s::%s : %s", tkMember,szFQN,ProperName((char*)pcMember),pcSig); printLine(g_pFile,szString); } } g_pImport->EnumClose(&hEnum); if(SUCCEEDED(g_pImport->EnumInit(mdtEvent, g_cl_list[i], &hEnum))) { while(g_pImport->EnumNext(&hEnum, &tkMember)) { if (FAILED(g_pImport->GetEventProps(tkMember,&pcMember,&dwAttrs,&tkEventType))) { sprintf_s(szString, SZSTRING_SIZE, "// Invalid Event %08X record", tkMember); printLine(g_pFile, szString); continue; } qbMemberSig.Shrink(0); pcSig = "NO TYPE"; if(RidFromToken(tkEventType)) { switch(TypeFromToken(tkEventType)) { case mdtTypeRef: case mdtTypeDef: case mdtTypeSpec: pcSig = PrettyPrintClass(&qbMemberSig,tkEventType,g_pImport); break; default: break; } } PREFIX_ASSUME(ProperName((char*)pcMember) != 0); sprintf_s(szString,SZSTRING_SIZE,"// %08X [EVT] %s::%s : %s", tkMember,szFQN,ProperName((char*)pcMember),pcSig); printLine(g_pFile,szString); } } g_pImport->EnumClose(&hEnum); if(SUCCEEDED(g_pImport->EnumInit(mdtProperty, g_cl_list[i], &hEnum))) { while(g_pImport->EnumNext(&hEnum, &tkMember)) { if (FAILED(g_pImport->GetPropertyProps(tkMember,&pcMember,&dwAttrs,&pComSig,&cComSig))) { sprintf_s(szString, SZSTRING_SIZE, "// Invalid Property %08X record", tkMember); printLine(g_pFile, szString); continue; } qbMemberSig.Shrink(0); pcSig = cComSig ? PrettyPrintSig(pComSig, cComSig, "", &qbMemberSig, g_pImport,NULL) : "NO SIGNATURE"; PREFIX_ASSUME(ProperName((char*)pcMember) != 0); sprintf_s(szString,SZSTRING_SIZE,"// %08X [PRO] %s::%s : %s", tkMember,szFQN,ProperName((char*)pcMember),pcSig); printLine(g_pFile,szString); } } g_pImport->EnumClose(&hEnum); } printLine(g_pFile,"//=============== END SUMMARY =================================="); } void DumpRTFPrefix(void* GUICookie,BOOL fFontDefault) { g_fDumpRTF = FALSE; printLine(GUICookie,"{\\rtf1\\ansi"); if(fFontDefault) printLine(GUICookie,"{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset1 Courier New;}{\\f1\\fswiss\\fcharset1 Arial;}}"); printLine(GUICookie,"{\\colortbl ;\\red0\\green0\\blue128;\\red0\\green128\\blue0;\\red255\\green0\\blue0;}"); printLine(GUICookie,"\\viewkind4\\uc1\\pard\\f0\\fs20"); g_fDumpRTF = TRUE; } void DumpRTFPostfix(void* GUICookie) { g_fDumpRTF = FALSE; printLine(GUICookie,"}"); g_fDumpRTF = TRUE; } mdToken ClassOf(mdToken tok) { mdToken retval=0; switch(TypeFromToken(tok)) { case mdtTypeDef: case mdtTypeRef: case mdtTypeSpec: retval = tok; break; case mdtFieldDef: case mdtMethodDef: case mdtMemberRef: if (FAILED(g_pImport->GetParentToken(tok, &retval))) { retval = mdTokenNil; } else { retval = ClassOf(retval); } break; default: break; } return retval; } void DumpRefs(BOOL fClassesOnly) { CQuickBytes out; DynamicArray<TokPair> *refs = g_refs; TokPair *newrefs = NULL; mdToken tkThisUser,tkThisRef; mdToken tkLastUser = 0xFFFFFFFF, tkLastRef=0xFFFFFFFF; DWORD i=0,j=0; g_refs = NULL; printLine(g_pFile,COMMENT((char*)0)); printLine(g_pFile,"//============ R E F E R E N C E S ==========================="); strcpy_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH,"// "); if(fClassesOnly && g_NumRefs) { if((newrefs = new TokPair[g_NumRefs])) { for(i=0; i<g_NumRefs; i++) { newrefs[i].tkUser = tkThisUser = ClassOf((*refs)[i].tkUser); newrefs[i].tkRef = tkThisRef = ClassOf((*refs)[i].tkRef); if(!tkThisUser) continue; if(!tkThisRef) continue; if(tkThisUser == tkThisRef) continue; for(j = 0; j<i; j++) { if((newrefs[j].tkUser==tkThisUser)&&(newrefs[j].tkRef==tkThisRef)) { newrefs[i].tkRef = 0; break; } } } } else fClassesOnly = FALSE; } for(i = 0; i <g_NumRefs; i++) { if(fClassesOnly) { tkThisUser = newrefs[i].tkUser; tkThisRef = newrefs[i].tkRef; } else { tkThisUser = (*refs)[i].tkUser; tkThisRef = (*refs)[i].tkRef; } if(!tkThisUser) continue; if(!tkThisRef) continue; if(tkThisUser == tkThisRef) continue; if((tkThisUser==tkLastUser)&&(tkThisRef==tkLastRef)) continue; strcpy_s(szString, SZSTRING_SIZE,g_szAsmCodeIndent); if(tkThisUser != tkLastUser) { PrettyPrintToken(szString, tkThisUser, g_pImport,g_pFile,0); //TypeDef,TypeRef,TypeSpec,MethodDef,FieldDef,MemberRef,MethodSpec,String strcat_s(szString, SZSTRING_SIZE, " references "); printLine(g_pFile,szString); tkLastUser = tkThisUser; } strcpy_s(szString, SZSTRING_SIZE,g_szAsmCodeIndent); strcat_s(szString, SZSTRING_SIZE," - "); PrettyPrintToken(szString, tkThisRef, g_pImport,g_pFile,0); //TypeDef,TypeRef,TypeSpec,MethodDef,FieldDef,MemberRef,MethodSpec,String printLine(g_pFile,szString); tkLastRef = tkThisRef; } printLine(g_pFile,"//=============== END REFERENCES ============================="); printLine(g_pFile,COMMENT((char*)-1)); g_refs = refs; if(newrefs) VDELETE(newrefs); } void CloseNamespace(__inout __nullterminated char* szString) { if(strlen(g_szNamespace)) { char* szptr = &szString[0]; if(g_szAsmCodeIndent[0]) g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-2] = 0; szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s%s ",g_szAsmCodeIndent, UNSCOPE()); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("// end of namespace %s"),ProperName(g_szNamespace)); printLine(g_pFile,szString); printLine(g_pFile,""); g_szNamespace[0] = 0; } } FILE* OpenOutput(_In_ __nullterminated const WCHAR* wzFileName) { FILE* pfile = NULL; if(g_uCodePage == 0xFFFFFFFF) _wfopen_s(&pfile,wzFileName,W("wb")); else _wfopen_s(&pfile,wzFileName,W("wt")); if(pfile) { if(g_uCodePage == CP_UTF8) fwrite("\357\273\277",3,1,pfile); else if(g_uCodePage == 0xFFFFFFFF) fwrite("\377\376",2,1,pfile); } return pfile; } FILE* OpenOutput(_In_ __nullterminated const char* szFileName) { return OpenOutput(UtfToUnicode(szFileName)); } // // Init PELoader, dump file header info // #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:21000) // Suppress PREFast warning about overly large function #endif BOOL DumpFile() { BOOL fSuccess = FALSE; static WCHAR wzInputFileName[MAX_FILENAME_LENGTH]; static char szFilenameANSI[MAX_FILENAME_LENGTH*3]; IMetaDataDispenser *pMetaDataDispenser = NULL; const char *pszFilename = g_szInputFile; const DWORD openFlags = ofRead | (g_fProject ? 0 : ofNoTransform); { if(g_fDumpHTML) { printLine(g_pFile, "<HTML>"); printLine(g_pFile, "<HEAD>"); sprintf_s(szString,SZSTRING_SIZE,"<TITLE> %s - IL DASM</TITLE>",g_szInputFile); printLine(g_pFile, szString); printLine(g_pFile, "</HEAD>"); printLine(g_pFile, "<BODY>"); printLine(g_pFile, "<FONT SIZE=3 FACE=\"Arial\">"); printLine(g_pFile, "<PRE>"); } else if(g_fDumpRTF) { DumpRTFPrefix(g_pFile,TRUE); } DumpPreamble(); } { char* pch = strrchr(g_szInputFile,'.'); if(pch && (!_stricmp(pch+1,"lib") || !_stricmp(pch+1,"obj"))) { DumpMetaInfo(g_wszFullInputFile,g_pszObjFileName,g_pFile); return FALSE; } } if(g_pPELoader) goto DoneInitialization; // skip initialization, it's already done g_pPELoader = new PELoader(); if (g_pPELoader == NULL) { printError(g_pFile,RstrUTF(IDS_E_INITLDR)); goto exit; } memset(wzInputFileName,0,sizeof(WCHAR)*MAX_FILENAME_LENGTH); WszMultiByteToWideChar(CP_UTF8,0,pszFilename,-1,wzInputFileName,MAX_FILENAME_LENGTH); memset(szFilenameANSI,0,MAX_FILENAME_LENGTH*3); WszWideCharToMultiByte(g_uConsoleCP,0,wzInputFileName,-1,szFilenameANSI,MAX_FILENAME_LENGTH*3,NULL,NULL); fSuccess = g_pPELoader->open(wzInputFileName); if (fSuccess == FALSE) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_FILEOPEN), pszFilename); printError(g_pFile,szString); SDELETE(g_pPELoader); g_pPELoader = NULL; goto exit; } fSuccess = FALSE; if (g_pPELoader->getCOMHeader(&g_CORHeader) == FALSE) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_NOCORHDR), pszFilename); printError(g_pFile,szString); if (g_fDumpHeader) DumpHeader(g_CORHeader,g_pFile); goto exit; } if (VAL16(g_CORHeader->MajorRuntimeVersion) == 1 || VAL16(g_CORHeader->MajorRuntimeVersion) > COR_VERSION_MAJOR) { sprintf_s(szString,SZSTRING_SIZE,"CORHeader->MajorRuntimeVersion = %d",VAL16(g_CORHeader->MajorRuntimeVersion)); printError(g_pFile,szString); printError(g_pFile,RstrUTF(IDS_E_BADCORHDR)); goto exit; } g_tkEntryPoint = VAL32(IMAGE_COR20_HEADER_FIELD(*g_CORHeader, EntryPointToken)); // integration with MetaInfo { if (g_pPELoader->getVAforRVA(VAL32(g_CORHeader->MetaData.VirtualAddress),&g_pMetaData) == FALSE) { printError(g_pFile, RstrUTF(IDS_E_OPENMD)); if (g_fDumpHeader) DumpHeader(g_CORHeader, g_pFile); goto exit; } g_cbMetaData = VAL32(g_CORHeader->MetaData.Size); } if (FAILED(GetMetaDataInternalInterface( (BYTE *)g_pMetaData, g_cbMetaData, openFlags, IID_IMDInternalImport, (LPVOID *)&g_pImport))) { if (g_fDumpHeader) DumpHeader(g_CORHeader, g_pFile); printError(g_pFile, RstrUTF(IDS_E_OPENMD)); goto exit; } TokenSigInit(g_pImport); if (FAILED(MetaDataGetDispenser(CLSID_CorMetaDataDispenser, IID_IMetaDataDispenser, (LPVOID*)&pMetaDataDispenser))) { if (g_fDumpHeader) DumpHeader(g_CORHeader, g_pFile); printError(g_pFile, RstrUTF(IDS_E_OPENMD)); goto exit; } if (FAILED(pMetaDataDispenser->OpenScopeOnMemory(g_pMetaData, g_cbMetaData, openFlags, IID_IMetaDataImport2, (LPUNKNOWN *)&g_pPubImport ))) { if (g_fDumpHeader) DumpHeader(g_CORHeader, g_pFile); printError(g_pFile, RstrUTF(IDS_E_OPENMD)); goto exit; } if((g_uNCA = g_pImport->GetCountWithTokenKind(mdtCustomAttribute))) { g_rchCA = new char[g_uNCA+1]; _ASSERTE(g_rchCA); } EnumClasses(); EnumTypedefs(); DoneInitialization: if(g_uNCA) { _ASSERTE(g_rchCA); memset(g_rchCA,0,g_uNCA+1); } { // Dump the CLR header info if requested. printLine(g_pFile,COMMENT((char*)0)); // start multiline comment if (g_fDumpHeader) { DumpHeader(g_CORHeader,g_pFile); DumpHeaderDetails(g_CORHeader,g_pFile); } else DumpVTables(g_CORHeader,g_pFile); if (g_fDumpStats) DumpStatistics(g_CORHeader,g_pFile); if(g_fDumpClassList) PrintClassList(); // MetaInfo integration: if(g_fDumpMetaInfo) DumpMetaInfo(g_wszFullInputFile,NULL,g_pFile); if(g_fDumpSummary) DumpSummary(); printLine(g_pFile,COMMENT((char*)-1)); // end multiline comment if(g_fShowRefs) g_refs = new DynamicArray<TokPair>; if (g_fDumpAsmCode) { g_szNamespace[0] = 0; if(g_tkClassToDump) //g_tkClassToDump is set in EnumClasses { DumpClass(TopEncloser(g_tkClassToDump), VAL32(IMAGE_COR20_HEADER_FIELD(*g_CORHeader, EntryPointToken)),g_pFile,7); //7-dump everything at once CloseNamespace(szString); goto ReportAndExit; } { HENUMInternal hEnumMethod; ULONG ulNumGlobalFunc=0; if (SUCCEEDED(g_pImport->EnumGlobalFunctionsInit(&hEnumMethod))) { ulNumGlobalFunc = g_pImport->EnumGetCount(&hEnumMethod); g_pImport->EnumClose(&hEnumMethod); } } //DumpVtable(g_pFile); DumpMscorlib(g_pFile); if(g_fDumpTypeList) DumpTypelist(g_pFile); DumpManifest(g_pFile); DumpTypedefs(g_pFile); /* First dump the classes w/o members*/ if(g_fForwardDecl && g_NumClasses) { printLine(g_pFile,COMMENT("//")); printLine(g_pFile,COMMENT("// ============== CLASS STRUCTURE DECLARATION ==================")); printLine(g_pFile,COMMENT("//")); for (DWORD i = 0; i < g_NumClasses; i++) { if(g_cl_enclosing[i] == mdTypeDefNil) // nested classes are dumped within enclosing ones { DumpClass(g_cl_list[i], VAL32(IMAGE_COR20_HEADER_FIELD(*g_CORHeader, EntryPointToken)),g_pFile,2); // 2=header+nested classes } } CloseNamespace(szString); printLine(g_pFile,""); printLine(g_pFile,COMMENT("// =============================================================")); printLine(g_pFile,""); } /* Second, dump the global fields and methods */ DumpGlobalFields(); DumpGlobalMethods(VAL32(IMAGE_COR20_HEADER_FIELD(*g_CORHeader, EntryPointToken))); /* Third, dump the classes with members */ if(g_NumClasses) { printLine(g_pFile,""); printLine(g_pFile,COMMENT("// =============== CLASS MEMBERS DECLARATION ===================")); if(g_fForwardDecl) { printLine(g_pFile,COMMENT("// note that class flags, 'extends' and 'implements' clauses")); printLine(g_pFile,COMMENT("// are provided here for information only")); } printLine(g_pFile,""); for (DWORD i = 0; i < g_NumClasses; i++) { if(g_cl_enclosing[i] == mdTypeDefNil) // nested classes are dumped within enclosing ones { DumpClass(g_cl_list[i], VAL32(IMAGE_COR20_HEADER_FIELD(*g_CORHeader, EntryPointToken)),g_pFile,7); //7=everything } } CloseNamespace(szString); printLine(g_pFile,""); printLine(g_pFile,COMMENT("// =============================================================")); printLine(g_pFile,""); } if(g_fShowCA) { if(g_uNCA) _ASSERTE(g_rchCA); for(DWORD i=1; i<= g_uNCA; i++) { if(g_rchCA[i] == 0) DumpCustomAttribute(TokenFromRid(i,mdtCustomAttribute),g_pFile,true); } } // If there were "ldptr", dump the .rdata section with labels if(g_iPtrCount) { //first, sort the pointers int i,j; bool swapped; do { swapped = FALSE; for(i = 1; i < g_iPtrCount; i++) { if((*g_pPtrTags)[i-1] > (*g_pPtrTags)[i]) { j = (*g_pPtrTags)[i-1]; (*g_pPtrTags)[i-1] = (*g_pPtrTags)[i]; (*g_pPtrTags)[i] = j; j = (*g_pPtrSize)[i-1]; (*g_pPtrSize)[i-1] = (*g_pPtrSize)[i]; (*g_pPtrSize)[i] = j; swapped = TRUE; } } } while(swapped); //second, dump data for each ptr as binarray IMAGE_SECTION_HEADER *pSecHdr = NULL; if(g_pPELoader->IsPE32()) pSecHdr = IMAGE_FIRST_SECTION(g_pPELoader->ntHeaders32()); else pSecHdr = IMAGE_FIRST_SECTION(g_pPELoader->ntHeaders64()); DWORD dwNumberOfSections; if(g_pPELoader->IsPE32()) dwNumberOfSections = VAL16(g_pPELoader->ntHeaders32()->FileHeader.NumberOfSections); else dwNumberOfSections = VAL16(g_pPELoader->ntHeaders64()->FileHeader.NumberOfSections); DWORD fromPtr,toPtr,limPtr; char* szptr; for(j = 0; j < g_iPtrCount; j++) { BYTE *pb; fromPtr = (*g_pPtrTags)[j]; for (i=0; i < (int)dwNumberOfSections; i++,pSecHdr++) { if((fromPtr >= VAL32(pSecHdr->VirtualAddress))&& (fromPtr < VAL32(pSecHdr->VirtualAddress)+VAL32(pSecHdr->Misc.VirtualSize))) break; } if(i == (int)dwNumberOfSections) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_ROGUEPTR), fromPtr); printLine(g_pFile,szString); break; } // OK, now we have the section; what about end of BLOB? const char* szTls = "D_"; if(strcmp((char*)(pSecHdr->Name),".tls")==0) szTls = "T_"; else if(strcmp((char*)(pSecHdr->Name),".text")==0) szTls = "I_"; if(j == g_iPtrCount-1) { toPtr = VAL32(pSecHdr->VirtualAddress)+VAL32(pSecHdr->Misc.VirtualSize); } else { toPtr = (*g_pPtrTags)[j+1]; if(toPtr > VAL32(pSecHdr->VirtualAddress)+VAL32(pSecHdr->Misc.VirtualSize)) { toPtr = VAL32(pSecHdr->VirtualAddress)+VAL32(pSecHdr->Misc.VirtualSize); } } if(toPtr - fromPtr > (*g_pPtrSize)[j]) toPtr = fromPtr + (*g_pPtrSize)[j]; limPtr = toPtr; // at limPtr and after, pad with 0 if(limPtr > VAL32(pSecHdr->VirtualAddress)+VAL32(pSecHdr->SizeOfRawData)) limPtr = VAL32(pSecHdr->VirtualAddress)+VAL32(pSecHdr->SizeOfRawData); PrintBlob: szptr = szString; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".data")); if(*szTls=='T') szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("tls ")); else if(*szTls=='I') szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("cil ")); if(fromPtr >= limPtr) { // uninitialized data sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s%8.8X = %s[%d]",szTls,fromPtr,KEYWORD("int8"),toPtr-fromPtr); printLine(g_pFile,szString); } else { // initialized data szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s%8.8X = %s (",szTls,fromPtr,KEYWORD("bytearray")); printLine(g_pFile,szString); szptr = szString; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s ",g_szAsmCodeIndent); pb = g_pPELoader->base() + VAL32(pSecHdr->PointerToRawData) + fromPtr - VAL32(pSecHdr->VirtualAddress); // now fromPtr is the beginning of the BLOB, and toPtr is [exclusive] end of it DumpHexbytes(szptr, pb, fromPtr, toPtr, limPtr); } // to preserve alignment, dump filler if any if(limPtr == toPtr) // don't need filler if it's the last item in section { if((j < g_iPtrCount-1)&&(toPtr < (DWORD)((*g_pPtrTags)[j+1]))) { DWORD align; DWORD stptr = (DWORD)(*g_pPtrTags)[j+1]; for(align = 1; (align & stptr)==0; align = align << 1); align -= 1; if(toPtr & align) { fromPtr = toPtr; toPtr = (toPtr + align)&~align; goto PrintBlob; } } } } } ReportAndExit: printLine(g_pFile,COMMENT(RstrUTF(IDS_E_DASMOK))); fSuccess = TRUE; } fSuccess = TRUE; #ifndef TARGET_UNIX if(g_pFile) // dump .RES file (if any), if not to console { WCHAR wzResFileName[2048], *pwc; memset(wzResFileName,0,sizeof(wzResFileName)); WszMultiByteToWideChar(CP_UTF8,0,g_szOutputFile,-1,wzResFileName,2048); pwc = wcsrchr(wzResFileName,L'.'); if(pwc == NULL) pwc = &wzResFileName[wcslen(wzResFileName)]; wcscpy_s(pwc, 2048 - (pwc - wzResFileName), L".res"); DWORD ret = DumpResourceToFile(wzResFileName); switch(ret) { case 0: szString[0] = 0; break; case 1: sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_W_CREATEDW32RES)/*"// WARNING: Created Win32 resource file %ls"*/, UnicodeToUtf(wzResFileName)); break; case 0xDFFFFFFF: sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_CORRUPTW32RES)/*"// ERROR: Corrupt Win32 resources"*/); break; case 0xEFFFFFFF: sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_CANTOPENW32RES)/*"// ERROR: Unable to open file %ls"*/, UnicodeToUtf(wzResFileName)); break; case 0xFFFFFFFF: sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_CANTACCESSW32RES)/*"// ERROR: Unable to access Win32 resources"*/); break; } if(szString[0]) { if(ret == 1) printLine(g_pFile,COMMENT(szString)); else printError(g_pFile,szString); } } #endif if(g_fShowRefs) DumpRefs(TRUE); if(g_fDumpHTML) { printLine(g_pFile, "</PRE>"); printLine(g_pFile, "</BODY>"); printLine(g_pFile, "</HTML>"); } else if(g_fDumpRTF) { DumpRTFPostfix(g_pFile); } if(g_pFile) { fclose(g_pFile); g_pFile = NULL; } } exit: if (pMetaDataDispenser) pMetaDataDispenser->Release(); return fSuccess; } #ifdef _PREFAST_ #pragma warning(pop) #endif #ifdef _MSC_VER #pragma warning(default : 4640) #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "ildasmpch.h" #include <crtdbg.h> #include <utilcode.h> #include "specstrings.h" #include "debugmacros.h" #include "corpriv.h" #include "ceeload.h" #include "dynamicarray.h" #include <metamodelpub.h> #include "formattype.h" #define DECLARE_DATA #include "dasmenum.hpp" #include "dis.h" #include "resource.h" #include "dasm_sz.h" //#define MAX_FILENAME_LENGTH 2048 //moved to dis.h #include <corsym.h> #include <clrversion.h> // Disable the "initialization of static local vars is no thread safe" error #ifdef _MSC_VER #pragma warning(disable : 4640) #endif #ifdef TARGET_UNIX #include "resourcestring.h" #define NATIVE_STRING_RESOURCE_NAME dasm_rc DECLARE_NATIVE_STRING_RESOURCE_TABLE(NATIVE_STRING_RESOURCE_NAME); #endif #include "mdfileformat.h" struct MIDescriptor { mdToken tkClass; // defining class token mdToken tkDecl; // implemented method token mdToken tkBody; // implementing method token mdToken tkBodyParent; // parent of the implementing method }; ISymUnmanagedReader* g_pSymReader = NULL; IMDInternalImport* g_pImport = NULL; IMetaDataImport2* g_pPubImport; extern IMetaDataAssemblyImport* g_pAssemblyImport; PELoader * g_pPELoader; void * g_pMetaData; unsigned g_cbMetaData; IMAGE_COR20_HEADER * g_CORHeader; DynamicArray<__int32> *g_pPtrTags = NULL; //to keep track of all "ldptr" DynamicArray<DWORD> *g_pPtrSize= NULL; //to keep track of all "ldptr" int g_iPtrCount = 0; mdToken * g_cl_list = NULL; mdToken * g_cl_enclosing = NULL; BYTE* g_enum_td_type = NULL; // enum (TD) underlying types BYTE* g_enum_tr_type = NULL; // enum (TR) underlying types IMDInternalImport** g_asmref_import = NULL; // IMDInternalImports for external assemblies DynamicArray<MIDescriptor> *g_pmi_list = NULL; DWORD g_NumMI; DWORD g_NumClasses; DWORD g_NumTypeRefs; DWORD g_NumAsmRefs; DWORD g_NumModules; BOOL g_fDumpIL = TRUE; BOOL g_fDumpHeader = FALSE; BOOL g_fDumpAsmCode = TRUE; extern BOOL g_fDumpTokens; // declared in formatType.cpp BOOL g_fDumpStats = FALSE; BOOL g_fTDC = TRUE; BOOL g_fShowCA = TRUE; BOOL g_fCAVerbal = FALSE; BOOL g_fShowRefs = FALSE; BOOL g_fDumpToPerfWriter = FALSE; HANDLE g_PerfDataFilePtr = NULL; BOOL g_fDumpClassList = FALSE; BOOL g_fDumpTypeList = FALSE; BOOL g_fDumpSummary = FALSE; BOOL g_fDecompile = FALSE; // still in progress BOOL g_fShowBytes = FALSE; BOOL g_fShowSource = FALSE; BOOL g_fPrettyPrint = FALSE; BOOL g_fInsertSourceLines = FALSE; BOOL g_fThisIsInstanceMethod; BOOL g_fTryInCode = TRUE; BOOL g_fLimitedVisibility = FALSE; BOOL g_fHidePub = TRUE; BOOL g_fHidePriv = TRUE; BOOL g_fHideFam = TRUE; BOOL g_fHideAsm = TRUE; BOOL g_fHideFAA = TRUE; BOOL g_fHideFOA = TRUE; BOOL g_fHidePrivScope = TRUE; BOOL g_fProject = FALSE; // if .winmd file, transform to .NET view extern BOOL g_fQuoteAllNames; // declared in formatType.cpp, init to FALSE BOOL g_fForwardDecl=FALSE; char g_szAsmCodeIndent[MAX_MEMBER_LENGTH]; char g_szNamespace[MAX_MEMBER_LENGTH]; DWORD g_Mode = MODE_DUMP_ALL; char g_pszClassToDump[MAX_CLASSNAME_LENGTH]; char g_pszMethodToDump[MAX_MEMBER_LENGTH]; char g_pszSigToDump[MAX_SIGNATURE_LENGTH]; BOOL g_fCustomInstructionEncodingSystem = FALSE; COR_FIELD_OFFSET *g_rFieldOffset = NULL; ULONG g_cFieldsMax, g_cFieldOffsets; char* g_pszExeFile; char g_szInputFile[MAX_FILENAME_LENGTH]; // in UTF-8 WCHAR g_wszFullInputFile[MAX_PATH + 1]; // in UTF-16 char g_szOutputFile[MAX_FILENAME_LENGTH]; // in UTF-8 char* g_pszObjFileName; FILE* g_pFile = NULL; mdToken g_tkClassToDump = 0; mdToken g_tkMethodToDump = 0; unsigned g_uConsoleCP = CP_ACP; unsigned g_uCodePage = g_uConsoleCP; char* g_rchCA = NULL; // dyn.allocated array of CA dumped/not flags unsigned g_uNCA = 0; // num. of CAs struct ResourceNode; extern DynamicArray<LocalComTypeDescr*> *g_pLocalComType; extern ULONG g_LocalComTypeNum; // MetaInfo integration: #include "../tools/metainfo/mdinfo.h" BOOL g_fDumpMetaInfo = FALSE; ULONG g_ulMetaInfoFilter = MDInfo::dumpDefault; // Validator module type. DWORD g_ValModuleType = ValidatorModuleTypeInvalid; IMetaDataDispenserEx *g_pDisp = NULL; void DisplayFile(_In_ __nullterminated WCHAR* szFile, BOOL isFile, ULONG DumpFilter, _In_opt_z_ WCHAR* szObjFile, strPassBackFn pDisplayString); extern mdMethodDef g_tkEntryPoint; // integration with MetaInfo DWORD DumpResourceToFile(_In_ __nullterminated WCHAR* wzFileName); // see DRES.CPP struct VTableRef { mdMethodDef tkTok; WORD wEntry; WORD wSlot; }; DynamicArray<VTableRef> *g_prVTableRef = NULL; ULONG g_nVTableRef = 0; struct EATableRef { mdMethodDef tkTok; char* pszName; }; DynamicArray<EATableRef> *g_prEATableRef=NULL; ULONG g_nEATableRef = 0; ULONG g_nEATableBase = 0; extern HINSTANCE g_hResources; void DumpCustomAttributeProps(mdToken tkCA, mdToken tkType, mdToken tkOwner, BYTE*pBlob, ULONG ulLen, void *GUICookie, bool bWithOwner); WCHAR* RstrW(unsigned id) { static WCHAR buffer[1024]; DWORD cchBuff = (DWORD)ARRAY_SIZE(buffer); WCHAR* buff = (WCHAR*)buffer; memset(buffer,0,sizeof(buffer)); switch(id) { case IDS_E_DASMOK: case IDS_E_PARTDASM: case IDS_E_PARAMSEQNO: case IDS_E_MEMBRENUM: case IDS_E_ODDMEMBER: case IDS_E_ENUMINIT: case IDS_E_NODATA: case IDS_E_VTFUTABLE: case IDS_E_BOGUSRVA: case IDS_E_EATJTABLE: case IDS_E_EATJSIZE: case IDS_E_RESFLAGS: case IDS_E_MIHENTRY: case IDS_E_CODEMGRTBL: case IDS_E_COMIMAGE: case IDS_E_MDDETAILS: case IDS_E_MISTART: case IDS_E_MIEND: case IDS_E_ONLYITEMS: case IDS_E_DECOMPRESS: case IDS_E_COMPRESSED: case IDS_E_INSTRDECOD: case IDS_E_INSTRTYPE: case IDS_E_SECTHEADER: case IDS_E_MDAIMPORT: case IDS_E_MDAFROMMDI: case IDS_E_MDIIMPORT: case IDS_E_NOMANIFEST: case IDS_W_CREATEDW32RES: case IDS_E_CORRUPTW32RES: case IDS_E_CANTACCESSW32RES: case IDS_E_CANTOPENW32RES: case IDS_ERRORREOPENINGFILE: wcscpy_s(buffer,ARRAY_SIZE(buffer),W("// ")); buff +=3; cchBuff -= 3; break; case IDS_E_AUTOCA: case IDS_E_METHBEG: case IDS_E_DASMNATIVE: case IDS_E_METHODRT: case IDS_E_CODESIZE: case IDS_W_CREATEDMRES: case IDS_E_READINGMRES: wcscpy_s(buffer,ARRAY_SIZE(buffer),W("%s// ")); buff +=5; cchBuff -= 5; break; case IDS_E_NORVA: wcscpy_s(buffer,ARRAY_SIZE(buffer),W("/* ")); buff += 3; cchBuff -= 3; break; default: break; } #ifdef TARGET_UNIX LoadNativeStringResource(NATIVE_STRING_RESOURCE_TABLE(NATIVE_STRING_RESOURCE_NAME),id, buff, cchBuff, NULL); #else _ASSERTE(g_hResources != NULL); WszLoadString(g_hResources,id,buff,cchBuff); #endif if(id == IDS_E_NORVA) wcscat_s(buff,cchBuff,W(" */")); return buffer; } char* RstrA(unsigned n, unsigned codepage) { static char buff[2048]; WCHAR* wz = RstrW(n); // Unicode -> UTF-8 memset(buff,0,sizeof(buff)); if(!WszWideCharToMultiByte(codepage,0,(LPCWSTR)wz,-1,buff,sizeof(buff),NULL,NULL)) buff[0] = 0; return buff; } char* RstrUTF(unsigned n) { return RstrA(n,CP_UTF8); } char* RstrANSI(unsigned n) { return RstrA(n,g_uConsoleCP); } #if 0 void PrintEncodingSystem() { long i; printf("Custom opcode encoding system employed\n"); printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); for (i = 0; i < 256; i++) { long value = g_pInstructionDecodingTable->m_SingleByteOpcodes[i]; printf("0x%02x --> ", i); printf("%s\n", OpcodeInfo[value].pszName); } } #endif // buffers for formatType functions extern CQuickBytes * g_szBuf_KEYWORD; extern CQuickBytes * g_szBuf_COMMENT; extern CQuickBytes * g_szBuf_ERRORMSG; extern CQuickBytes * g_szBuf_ANCHORPT; extern CQuickBytes * g_szBuf_JUMPPT; extern CQuickBytes * g_szBuf_UnquotedProperName; extern CQuickBytes * g_szBuf_ProperName; BOOL Init() { g_szBuf_KEYWORD = new CQuickBytes(); g_szBuf_COMMENT = new CQuickBytes(); g_szBuf_ERRORMSG = new CQuickBytes(); g_szBuf_ANCHORPT = new CQuickBytes(); g_szBuf_JUMPPT = new CQuickBytes(); g_szBuf_UnquotedProperName = new CQuickBytes(); g_szBuf_ProperName = new CQuickBytes(); return TRUE; } // Init extern LPCSTR *rAsmRefName; // decl. in formatType.cpp -- for AsmRef aliases extern ULONG ulNumAsmRefs; // decl. in formatType.cpp -- for AsmRef aliases void Cleanup() { if (g_pAssemblyImport != NULL) { g_pAssemblyImport->Release(); g_pAssemblyImport = NULL; } if (g_pPubImport != NULL) { g_pPubImport->Release(); g_pPubImport = NULL; } if (g_pImport != NULL) { g_pImport->Release(); g_pImport = NULL; TokenSigDelete(); } if (g_pDisp != NULL) { g_pDisp->Release(); g_pDisp = NULL; } if (g_pSymReader != NULL) { g_pSymReader->Release(); g_pSymReader = NULL; } if (g_pPELoader != NULL) { g_pPELoader->close(); SDELETE(g_pPELoader); } g_iPtrCount = 0; g_NumClasses = 0; g_NumTypeRefs = 0; g_NumModules = 0; g_tkEntryPoint = 0; g_szAsmCodeIndent[0] = 0; g_szNamespace[0]=0; g_pszClassToDump[0]=0; g_pszMethodToDump[0]=0; g_pszSigToDump[0] = 0; g_NumDups = 0; g_NumRefs = 0; g_NumMI = 0; g_LocalComTypeNum = 0; g_nEATableRef = 0; g_fCustomInstructionEncodingSystem = FALSE; if (rAsmRefName != NULL) { for (int i = 0; (unsigned)i < ulNumAsmRefs; i++) { if (rAsmRefName[i] != NULL) VDELETE(rAsmRefName[i]); } VDELETE(rAsmRefName); ulNumAsmRefs = 0; } if (g_rchCA != NULL) VDELETE(g_rchCA); if (g_cl_list != NULL) VDELETE(g_cl_list); if (g_cl_enclosing != NULL) VDELETE(g_cl_enclosing); if (g_pmi_list != NULL) SDELETE(g_pmi_list); if (g_dups != NULL) SDELETE(g_dups); if (g_enum_td_type != NULL) VDELETE(g_enum_td_type); if (g_enum_tr_type != NULL) VDELETE(g_enum_tr_type); if (g_asmref_import != NULL) { for (DWORD i = 0; i < g_NumAsmRefs; i++) { if (g_asmref_import[i] != NULL) g_asmref_import[i]->Release(); } VDELETE(g_asmref_import); g_NumAsmRefs = 0; } } // Cleanup void Uninit() { if (g_pPtrTags != NULL) { SDELETE(g_pPtrTags); } if (g_pPtrSize != NULL) { SDELETE(g_pPtrSize); } if (g_pmi_list != NULL) { SDELETE(g_pmi_list); } if (g_dups != NULL) SDELETE(g_dups); if (g_refs != NULL) SDELETE(g_refs); if (g_pLocalComType != NULL) { SDELETE(g_pLocalComType); } if (g_prVTableRef != NULL) { SDELETE(g_prVTableRef); } if (g_prEATableRef != NULL) { SDELETE(g_prEATableRef); } if (g_szBuf_KEYWORD != NULL) { SDELETE(g_szBuf_KEYWORD); } if (g_szBuf_COMMENT != NULL) { SDELETE(g_szBuf_COMMENT); } if (g_szBuf_ERRORMSG != NULL) { SDELETE(g_szBuf_ERRORMSG); } if (g_szBuf_ANCHORPT != NULL) { SDELETE(g_szBuf_ANCHORPT); } if (g_szBuf_JUMPPT != NULL) { SDELETE(g_szBuf_JUMPPT); } if (g_szBuf_UnquotedProperName != NULL) { SDELETE(g_szBuf_UnquotedProperName); } if (g_szBuf_ProperName != NULL) { SDELETE(g_szBuf_ProperName); } } // Uninit HRESULT IsClassRefInScope(mdTypeRef classref) { HRESULT hr = S_OK; const char *pszNameSpace; const char *pszClassName; mdTypeDef classdef; mdToken tkRes; IfFailRet(g_pImport->GetNameOfTypeRef(classref, &pszNameSpace, &pszClassName)); MAKE_NAME_IF_NONE(pszClassName,classref); IfFailRet(g_pImport->GetResolutionScopeOfTypeRef(classref, &tkRes)); hr = g_pImport->FindTypeDef(pszNameSpace, pszClassName, (TypeFromToken(tkRes) == mdtTypeRef) ? tkRes : mdTokenNil, &classdef); return hr; } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:21000) // Suppress PREFast warning about overly large function #endif BOOL EnumClasses() { HRESULT hr; HENUMInternal hEnum; ULONG i = 0,j; //char szString[1024]; HENUMInternal hBody; HENUMInternal hDecl; if(g_cl_list) VDELETE(g_cl_list); if(g_cl_enclosing) VDELETE(g_cl_enclosing); if (g_pmi_list) SDELETE(g_pmi_list); if (g_dups) SDELETE(g_dups); if (g_enum_td_type) VDELETE(g_enum_td_type); if (g_enum_tr_type) VDELETE(g_enum_tr_type); if (g_asmref_import) { for (DWORD nIndex = 0; nIndex < g_NumAsmRefs; nIndex++) { if (g_asmref_import[nIndex] != NULL) g_asmref_import[nIndex]->Release(); } VDELETE(g_asmref_import); g_NumAsmRefs = 0; } //-------------------------------------------------------------- if (FAILED(g_pImport->EnumAllInit(mdtTypeRef,&hEnum))) { printError(g_pFile, "MetaData error: cannot enumerate all TypeRefs"); return FALSE; } g_NumTypeRefs = g_pImport->EnumGetCount(&hEnum); g_pImport->EnumClose(&hEnum); if(g_NumTypeRefs) { g_enum_tr_type = new BYTE[g_NumTypeRefs+1]; if(g_enum_tr_type == NULL) return FALSE; memset(g_enum_tr_type,0xFF,g_NumTypeRefs+1); } //-------------------------------------------------------------- if (FAILED(g_pImport->EnumAllInit(mdtAssemblyRef, &hEnum))) { printError(g_pFile, "MetaData error: cannot enumerate all AssemblyRefs"); return FALSE; } g_NumAsmRefs = g_pImport->EnumGetCount(&hEnum); g_pImport->EnumClose(&hEnum); if(g_NumAsmRefs) { g_asmref_import = new IMDInternalImport*[g_NumAsmRefs+1]; if(g_asmref_import == NULL) return FALSE; memset(g_asmref_import,0,(g_NumAsmRefs+1)*sizeof(IMDInternalImport*)); } //-------------------------------------------------------------- hr = g_pImport->EnumTypeDefInit( &hEnum); if (FAILED(hr)) { printError(g_pFile,RstrUTF(IDS_E_CLSENUM)); return FALSE; } g_NumClasses = g_pImport->EnumGetCount(&hEnum); g_tkClassToDump = 0; g_NumMI = 0; g_NumDups = 0; if(g_NumClasses == 0) return TRUE; g_enum_td_type = new BYTE[g_NumClasses+1]; if(g_enum_td_type == NULL) return FALSE; memset(g_enum_td_type,0xFF,g_NumClasses+1); g_cl_list = new mdToken[g_NumClasses]; if(g_cl_list == NULL) return FALSE; g_cl_enclosing = new mdToken[g_NumClasses]; if(g_cl_enclosing == NULL) { VDELETE(g_cl_list); return FALSE; } g_pmi_list = new DynamicArray<MIDescriptor>; if(g_pmi_list == NULL) { VDELETE(g_cl_enclosing); VDELETE(g_cl_list); return FALSE; } g_dups = new DynamicArray<mdToken>; if(g_dups == NULL) { SDELETE(g_pmi_list); VDELETE(g_cl_enclosing); VDELETE(g_cl_list); return FALSE; } // fill the list of typedef tokens while(g_pImport->EnumNext(&hEnum, &g_cl_list[i])) { mdToken tkEnclosing; if (g_Mode == MODE_DUMP_CLASS || g_Mode == MODE_DUMP_CLASS_METHOD || g_Mode == MODE_DUMP_CLASS_METHOD_SIG) { CQuickBytes out; // we want plain class name without token values BOOL fDumpTokens = g_fDumpTokens; g_fDumpTokens = FALSE; PAL_CPP_TRY { if (strcmp(PrettyPrintClass(&out, g_cl_list[i], g_pImport), g_pszClassToDump) == 0) { g_tkClassToDump = g_cl_list[i]; } } PAL_CPP_CATCH_ALL { } PAL_CPP_ENDTRY; g_fDumpTokens = fDumpTokens; } g_cl_enclosing[i] = mdTypeDefNil; hr = g_pImport->GetNestedClassProps(g_cl_list[i],&tkEnclosing); if (SUCCEEDED(hr) && RidFromToken(tkEnclosing)) // No need to check token validity here, it's done later g_cl_enclosing[i] = tkEnclosing; if (SUCCEEDED(g_pImport->EnumMethodImplInit(g_cl_list[i],&hBody,&hDecl))) { if ((j = g_pImport->EnumMethodImplGetCount(&hBody,&hDecl))) { mdToken tkBody,tkDecl,tkBodyParent; for (ULONG k = 0; k < j; k++) { if (g_pImport->EnumMethodImplNext(&hBody,&hDecl,&tkBody,&tkDecl) == S_OK) { if (SUCCEEDED(g_pImport->GetParentToken(tkBody,&tkBodyParent))) { (*g_pmi_list)[g_NumMI].tkClass = g_cl_list[i]; (*g_pmi_list)[g_NumMI].tkBody = tkBody; (*g_pmi_list)[g_NumMI].tkDecl = tkDecl; (*g_pmi_list)[g_NumMI].tkBodyParent = tkBodyParent; g_NumMI++; } } } } g_pImport->EnumMethodImplClose(&hBody,&hDecl); } i++; } g_pImport->EnumClose(&hEnum); // check nesting consistency (circular nesting, invalid enclosers) for(i = 0; i < g_NumClasses; i++) { mdToken tkThis = g_cl_list[i]; mdToken tkEncloser = g_cl_enclosing[i]; mdToken tkPrevLevel = tkThis; while(tkEncloser != mdTypeDefNil) { if(tkThis == tkEncloser) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_SELFNSTD),tkThis); printError(g_pFile,szString); g_cl_enclosing[i] = mdTypeDefNil; break; } else { for(j = 0; (j < g_NumClasses)&&(tkEncloser != g_cl_list[j]); j++); if(j == g_NumClasses) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_NOENCLOS), tkPrevLevel,tkEncloser); printError(g_pFile,szString); g_cl_enclosing[i] = mdTypeDefNil; break; } else { tkPrevLevel = tkEncloser; tkEncloser = g_cl_enclosing[j]; } } } // end while(tkEncloser != mdTypeDefNil) } // end for(i = 0; i < g_NumClasses; i++) // register all class dups const char *pszClassName; const char *pszNamespace; const char *pszClassName1; const char *pszNamespace1; if (FAILED(g_pImport->GetNameOfTypeDef( g_cl_list[0], &pszClassName, &pszNamespace))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), g_cl_list[0]); printLine(g_pFile, sz); return FALSE; } if((g_cl_enclosing[0]==mdTypeDefNil) &&(0==strcmp(pszClassName,"<Module>")) &&(*pszNamespace == 0)) { (*g_dups)[g_NumDups++] = g_cl_list[0]; } for(i = 1; i < g_NumClasses; i++) { if (FAILED(g_pImport->GetNameOfTypeDef( g_cl_list[i], &pszClassName, &pszNamespace))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), g_cl_list[i]); printLine(g_pFile, sz); return FALSE; } for(j = 0; j < i; j++) { if (FAILED(g_pImport->GetNameOfTypeDef( g_cl_list[j], &pszClassName1, &pszNamespace1))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), g_cl_list[j]); printLine(g_pFile, sz); return FALSE; } if((g_cl_enclosing[i]==g_cl_enclosing[j]) &&(0==strcmp(pszClassName,pszClassName1)) &&(0==strcmp(pszNamespace,pszNamespace1))) { (*g_dups)[g_NumDups++] = g_cl_list[i]; break; } } } // end for(i = 1; i < g_NumClasses; i++) //register all field and method dups for(i = 0; i <= g_NumClasses; i++) { HENUMInternal hEnumMember; mdToken *pMemberList = NULL; DWORD NumMembers,k; // methods if (i != 0) { hr = g_pImport->EnumInit(mdtMethodDef, g_cl_list[i-1], &hEnumMember); } else { hr = g_pImport->EnumGlobalFunctionsInit(&hEnumMember); } if (FAILED(hr)) { printLine(g_pFile,RstrUTF(IDS_E_MEMBRENUM)); return FALSE; } NumMembers = g_pImport->EnumGetCount(&hEnumMember); pMemberList = new mdToken[NumMembers]; for (j = 0; g_pImport->EnumNext(&hEnumMember, &pMemberList[j]); j++); _ASSERTE(j == NumMembers); g_pImport->EnumClose(&hEnumMember); for (j = 1; j < NumMembers; j++) { const char *pszName; ULONG cSig; PCCOR_SIGNATURE pSig; if (FAILED(g_pImport->GetNameOfMethodDef(pMemberList[j], &pszName)) || FAILED(g_pImport->GetSigOfMethodDef(pMemberList[j], &cSig, &pSig))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), pMemberList[j]); printLine(g_pFile, sz); return FALSE; } for (k = 0; k < j; k++) { const char *szName1; if (FAILED(g_pImport->GetNameOfMethodDef(pMemberList[k], &szName1))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), pMemberList[k]); printLine(g_pFile, sz); return FALSE; } if (strcmp(pszName, szName1) == 0) { ULONG cSig1; PCCOR_SIGNATURE pSig1; if (FAILED(g_pImport->GetSigOfMethodDef(pMemberList[k], &cSig1, &pSig1))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), pMemberList[k]); printLine(g_pFile, sz); return FALSE; } if((cSig == cSig1)&&(0==memcmp(pSig,pSig1,cSig))) { (*g_dups)[g_NumDups++] = pMemberList[j]; break; } } } } VDELETE(pMemberList); // fields if (i != 0) { hr = g_pImport->EnumInit(mdtFieldDef, g_cl_list[i-1], &hEnumMember); } else { hr = g_pImport->EnumGlobalFieldsInit(&hEnumMember); } if (FAILED(hr)) { printLine(g_pFile,RstrUTF(IDS_E_MEMBRENUM)); return FALSE; } NumMembers = g_pImport->EnumGetCount(&hEnumMember); pMemberList = new mdToken[NumMembers]; for (j = 0; g_pImport->EnumNext(&hEnumMember, &pMemberList[j]); j++); _ASSERTE(j == NumMembers); g_pImport->EnumClose(&hEnumMember); for (j = 1; j < NumMembers; j++) { const char *pszName; ULONG cSig; PCCOR_SIGNATURE pSig; if (FAILED(g_pImport->GetNameOfFieldDef(pMemberList[j], &pszName)) || FAILED(g_pImport->GetSigOfFieldDef(pMemberList[j], &cSig, &pSig))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), pMemberList[j]); printLine(g_pFile, sz); return FALSE; } for (k = 0; k < j; k++) { const char *szName1; if (FAILED(g_pImport->GetNameOfFieldDef(pMemberList[k], &szName1))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), pMemberList[k]); printLine(g_pFile, sz); return FALSE; } if (strcmp(pszName, szName1) == 0) { ULONG cSig1; PCCOR_SIGNATURE pSig1; if (FAILED(g_pImport->GetSigOfFieldDef(pMemberList[k], &cSig1, &pSig1))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), pMemberList[k]); printLine(g_pFile, sz); return FALSE; } if((cSig == cSig1)&&(0==memcmp(pSig,pSig1,cSig))) { (*g_dups)[g_NumDups++] = pMemberList[j]; break; } } } } VDELETE(pMemberList); } // end for(i = 0; i <= g_NumClasses; i++) return TRUE; } #ifdef _PREFAST_ #pragma warning(pop) #endif void DumpMscorlib(void* GUICookie) { // In the CoreCLR with reference assemblies and redirection it is more difficult to determine if // a particular Assembly is the System assembly, like mscorlib.dll is for the Desktop CLR. // In the CoreCLR runtimes, the System assembly can be System.Private.CoreLib.dll, System.Runtime.dll // or netstandard.dll and in the future a different Assembly name could be used. // We now determine the identity of the System assembly by querying if the Assembly defines the // well known type System.Object as that type must be defined by the System assembly // If this type is defined then we will output the ".mscorlib" directive to indicate that this // assembly is the System assembly. // mdTypeDef tkObjectTypeDef = mdTypeDefNil; // Lookup the type System.Object and see it it has a type definition in this assembly if (SUCCEEDED(g_pPubImport->FindTypeDefByName(W("System.Object"), mdTypeDefNil, &tkObjectTypeDef))) { if (tkObjectTypeDef != mdTypeDefNil) { // We do have a type definition for System.Object in this assembly // DWORD dwClassAttrs = 0; mdToken tkExtends = mdTypeDefNil; // Retrieve the type def properties as well, so that we can check a few more things about // the System.Object type // if (SUCCEEDED(g_pPubImport->GetTypeDefProps(tkObjectTypeDef, NULL, NULL, 0, &dwClassAttrs, &tkExtends))) { bool bExtends = g_pPubImport->IsValidToken(tkExtends); bool isClass = ((dwClassAttrs & tdClassSemanticsMask) == tdClass); // We also check the type properties to make sure that we have a class and not a Value type definition // and that this type definition isn't extending another type. // if (isClass & !bExtends) { // We will mark this assembly with the System assembly directive: .mscorlib // printLine(GUICookie, ""); sprintf_s(szString, SZSTRING_SIZE, "%s%s ", g_szAsmCodeIndent, KEYWORD(".mscorlib")); printLine(GUICookie, szString); printLine(GUICookie, ""); } } } } } void DumpTypelist(void* GUICookie) { if(g_NumClasses > 1) { DWORD i; CQuickBytes out; printLine(GUICookie,""); sprintf_s(szString,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".typelist")); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"%s%s",g_szAsmCodeIndent,SCOPE()); printLine(GUICookie,szString); strcat_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH," "); for(i = 0; i < g_NumClasses; i++) { out.Shrink(0); sprintf_s(szString,SZSTRING_SIZE, "%s%s",g_szAsmCodeIndent, PrettyPrintClass(&out, g_cl_list[i], g_pImport)); printLine(GUICookie,szString); } g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-2] = 0; sprintf_s(szString,SZSTRING_SIZE,"%s%s",g_szAsmCodeIndent,UNSCOPE()); printLine(GUICookie,szString); printLine(GUICookie,""); } } #define ELEMENT_TYPE_TYPEDEF (ELEMENT_TYPE_MAX+1) BOOL EnumTypedefs() { HENUMInternal hEnum; ULONG i,l; mdToken tk; if (g_typedefs) SDELETE(g_typedefs); g_typedefs = new DynamicArray<TypeDefDescr>; g_NumTypedefs = 0; if (FAILED(g_pImport->EnumAllInit(mdtTypeSpec, &hEnum))) { return FALSE; } for (i = 0; g_pImport->EnumNext(&hEnum, &tk); i++) { ULONG cSig; PCCOR_SIGNATURE sig; if (FAILED(g_pImport->GetSigFromToken(tk, &cSig, &sig))) { return FALSE; } if (*sig == ELEMENT_TYPE_TYPEDEF) { TypeDefDescr* pTDD = &((*g_typedefs)[g_NumTypedefs]); pTDD->szName = (char*)sig+1; l = 2+(ULONG)strlen((char*)sig+1); pTDD->tkTypeSpec = GET_UNALIGNED_VAL32(sig + l); pTDD->tkSelf = tk; if (TypeFromToken(pTDD->tkTypeSpec) == mdtTypeSpec) { if (FAILED(g_pImport->GetSigFromToken(pTDD->tkTypeSpec,&(pTDD->cb), &(pTDD->psig)))) { return FALSE; } } else if (TypeFromToken(pTDD->tkTypeSpec) == mdtCustomAttribute) { l += sizeof(mdToken); pTDD->psig = sig + l; pTDD->cb = cSig - l; } else { pTDD->psig = NULL; pTDD->cb = 0; } g_NumTypedefs++; } } g_pImport->EnumClose(&hEnum); return TRUE; } void DumpTypedefs(void* GUICookie) { DWORD i; char* szptr; CQuickBytes out; printLine(GUICookie,""); for(i = 0; i < g_NumTypedefs; i++) { TypeDefDescr* pTDD = &((*g_typedefs)[i]); szptr = &szString[0]; szString[0] = 0; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,ANCHORPT(KEYWORD(".typedef"),pTDD->tkSelf)); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),pTDD->tkSelf); { ULONG n = g_NumTypedefs; DWORD tk = pTDD->tkTypeSpec; switch (TypeFromToken(tk)) { default: break; case mdtCustomAttribute: printLine(GUICookie,szString); strcat_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH," "); { mdToken tkType; mdToken tkOwner; BYTE* pBlob=NULL; ULONG uLen=0; tkType = GET_UNALIGNED_VAL32(pTDD->psig); tkOwner = GET_UNALIGNED_VAL32(pTDD->psig + sizeof(mdToken)); if(pTDD->cb > 2*sizeof(mdToken)) { pBlob = (BYTE*)pTDD->psig + 2*sizeof(mdToken); uLen = pTDD->cb - 2*sizeof(mdToken); } DumpCustomAttributeProps(0,tkType,tkOwner,pBlob,uLen,GUICookie, (RidFromToken(tkOwner)!=0)); } sprintf_s(szString,SZSTRING_SIZE,"%s %s %s", g_szAsmCodeIndent,KEYWORD("as"), ProperName((*g_typedefs)[i].szName)); printLine(GUICookie,szString); g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-8]=0; continue; case mdtMethodDef: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("method ")); break; case mdtFieldDef: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("field ")); break; case mdtMemberRef: { PCCOR_SIGNATURE typePtr; const char *pszMemberName; ULONG cComSig; if (FAILED(g_pImport->GetNameAndSigOfMemberRef( tk, &typePtr, &cComSig, &pszMemberName))) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"ERROR "); break; } unsigned callConv = CorSigUncompressData(typePtr); if (isCallConv(callConv, IMAGE_CEE_CS_CALLCONV_FIELD)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("field ")); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("method ")); break; } } g_NumTypedefs = 0; PrettyPrintToken(szString, tk, g_pImport,g_pFile,0); g_NumTypedefs = n; szptr = &szString[strlen(szString)]; } szptr+= sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," %s %s", KEYWORD("as"), ProperName((*g_typedefs)[i].szName)); printLine(GUICookie,szString); } } BOOL PrintClassList() { DWORD i; BOOL fSuccess = FALSE; //char szString[1024]; char* szptr; if(g_NumClasses) { printLine(g_pFile,COMMENT("// Classes defined in this module:")); printLine(g_pFile,COMMENT("//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")); for (i = 0; i < g_NumClasses; i++) { const char *pszClassName; const char *pszNamespace; DWORD dwClassAttrs; mdTypeRef crExtends; if (FAILED(g_pImport->GetNameOfTypeDef( g_cl_list[i], &pszClassName, &pszNamespace))) { printLine(g_pFile, COMMENT("// Invalid TypeDef record")); return FALSE; } MAKE_NAME_IF_NONE(pszClassName,g_cl_list[i]); // if this is the "<Module>" class (there is a misnomer) then skip it! if (FAILED(g_pImport->GetTypeDefProps( g_cl_list[i], &dwClassAttrs, &crExtends))) { printLine(g_pFile, COMMENT("// Invalid TypeDef record")); return FALSE; } szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"// "); if (IsTdInterface(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"Interface "); //else if (IsTdValueType(dwClassAttrs)) szptr+=sprintf(szptr,"Value Class"); //else if (IsTdUnmanagedValueType(dwClassAttrs)) szptr+=sprintf(szptr,"NotInGCHeap Value Class"); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"Class "); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%-30s ", pszClassName); if (IsTdPublic(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(public) "); if (IsTdAbstract(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(abstract) "); if (IsTdAutoLayout(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(auto) "); if (IsTdSequentialLayout(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(sequential) "); if (IsTdExplicitLayout(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(explicit) "); if (IsTdAnsiClass(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(ansi) "); if (IsTdUnicodeClass(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(unicode) "); if (IsTdAutoClass(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(autochar) "); if (IsTdImport(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(import) "); if (IsTdWindowsRuntime(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(windowsruntime) "); //if (IsTdEnum(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(enum) "); if (IsTdSealed(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(sealed) "); if (IsTdNestedPublic(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(nested public) "); if (IsTdNestedPrivate(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(nested private) "); if (IsTdNestedFamily(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(nested family) "); if (IsTdNestedAssembly(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(nested assembly) "); if (IsTdNestedFamANDAssem(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(nested famANDassem) "); if (IsTdNestedFamORAssem(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(nested famORassem) "); printLine(g_pFile,COMMENT(szString)); } printLine(g_pFile,COMMENT("//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")); printLine(g_pFile,""); } else printLine(g_pFile,COMMENT("// No classes defined in this module")); fSuccess = TRUE; return fSuccess; } BOOL ValidateToken(mdToken tk, ULONG type = (ULONG) ~0) { BOOL bRtn; //char szString[1024]; bRtn = g_pImport->IsValidToken(tk); if (!bRtn) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_INVALIDTK), tk); printError(g_pFile,szString); } else if (type != (ULONG) ~0 && TypeFromToken(tk) != type) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_UNEXPTYPE), TypeFromToken(type), TypeFromToken(tk)); printError(g_pFile,szString); bRtn = FALSE; } return bRtn; } BOOL DumpModule(mdModuleRef mdMod) { const char *pszModName; //char szString[1024]; if (FAILED(g_pImport->GetModuleRefProps(mdMod,&pszModName))) { pszModName = "Invalid ModuleRef record"; } MAKE_NAME_IF_NONE(pszModName,mdMod); sprintf_s(szString,SZSTRING_SIZE,"%s%s \"%s\"",g_szAsmCodeIndent,KEYWORD(".import"),pszModName); // what about GUID and MVID? printLine(g_pFile,szString); return TRUE; } char* DumpPinvokeMap(DWORD dwMappingFlags, const char *szImportName, mdModuleRef mrImportDLL, __inout __nullterminated char* szString, void* GUICookie) { const char *szImportDLLName; char* szptr = &szString[strlen(szString)]; if (FAILED(g_pImport->GetModuleRefProps(mrImportDLL,&szImportDLLName))) { szImportDLLName = "Invalid ModuleRef record"; } if(strlen(szImportDLLName) != 0) { szptr = DumpQString(GUICookie, (char*)szImportDLLName, g_szAsmCodeIndent, 80); } //if(strlen(szImportDLLName)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"\"%s\"",szImportDLLName); //if(szImportName && strlen(szImportName)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," as \"%s\"",szImportName); if(szImportName && strlen(szImportName)) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD(" as ")); szptr = DumpQString(GUICookie, (char*)szImportName, g_szAsmCodeIndent, 80); } szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)0)); if(IsPmNoMangle(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," nomangle"); if(IsPmCharSetAnsi(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," ansi"); if(IsPmCharSetUnicode(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," unicode"); if(IsPmCharSetAuto(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," autochar"); if(IsPmSupportsLastError(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," lasterr"); if(IsPmCallConvWinapi(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," winapi"); if(IsPmCallConvCdecl(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," cdecl"); if(IsPmCallConvThiscall(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," thiscall"); if(IsPmCallConvFastcall(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," fastcall"); if(IsPmCallConvStdcall(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," stdcall"); if(IsPmBestFitEnabled(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," bestfit:on"); if(IsPmBestFitDisabled(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," bestfit:off"); if(IsPmThrowOnUnmappableCharEnabled(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," charmaperror:on"); if(IsPmThrowOnUnmappableCharDisabled(dwMappingFlags)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," charmaperror:off"); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)-1)); return szptr; } void DumpByteArray(__inout __nullterminated char* szString, const BYTE* pBlob, ULONG ulLen, void* GUICookie) { ULONG32 ulStrOffset = 0; ULONG32 j = 0; ULONG32 k = 0; ULONG32 m = 0; char sz[256]; bool printsz = FALSE; char* szptr = NULL; BYTE byt = 0; ulStrOffset = (ULONG32) strlen(szString); szptr = &szString[ulStrOffset]; if(!pBlob) ulLen = 0; for(j = 0, k=0, m=0; j < ulLen; j++,k++,m++) { if(k == 16) { if(printsz) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT(" // %s"),sz); } printLine(GUICookie,szString); strcpy_s(szString,SZSTRING_SIZE,g_szAsmCodeIndent); for(k=(ULONG32) strlen(szString); k < ulStrOffset; k++) szString[k] = ' '; szString[k] = 0; szptr = &szString[ulStrOffset]; k = 0; m = 0; printsz = FALSE; } bool bBreak = FALSE; PAL_CPP_TRY { byt = pBlob[j]; } PAL_CPP_CATCH_ALL { strcat_s(szString, SZSTRING_SIZE,ERRORMSG("INVALID DATA ADDRESS")); bBreak = TRUE; } PAL_CPP_ENDTRY; if (bBreak) break; szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%2.2X ",byt); if(isprint(byt)) { if(g_fDumpRTF) { if((byt == '\\')||(byt=='{')||(byt=='}')) sz[m++]='\\'; sz[m] = byt; } else if(g_fDumpHTML) { if(byt == '<') { sz[m] = 0; strcat_s(sz,256-m,LTN()); m+=(ULONG32)(strlen(LTN())); } else if(byt == '>') { sz[m] = 0; strcat_s(sz,256-m,GTN()); m+=(ULONG32)(strlen(GTN())); } else sz[m] = byt; } else sz[m] = byt; printsz = TRUE; } else sz[m] = '.'; sz[m+1] = 0; } szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),") "); if(printsz) { for(j = k; j < 16; j++) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," "); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("// %s"),sz); } } mdToken ResolveTypeDefReflectionNotation(IMDInternalImport *pIMDI, LPCUTF8 szNamespace, __inout LPUTF8 szName, mdToken tkEncloser) { mdToken tk = 0; LPUTF8 pch = strrchr(szName, '+'); if(pch != NULL) { *pch = 0; tkEncloser = ResolveTypeDefReflectionNotation(pIMDI,szNamespace,szName,tkEncloser); szNamespace = ""; szName = pch+1; } if(SUCCEEDED(pIMDI->FindTypeDef(szNamespace,szName,tkEncloser,&tk))) return tk; else return 0; } mdToken ResolveTypeRefReflectionNotation(IMDInternalImport *pIMDI, _In_ __nullterminated const char* szNamespace, __inout __nullterminated char* szName, mdToken tkResScope) { mdToken tk = 0; char* pch = strrchr(szName, '+'); if(pch != NULL) { *pch = 0; tkResScope = ResolveTypeRefReflectionNotation(pIMDI,szNamespace,szName,tkResScope); szNamespace = ""; szName = pch+1; } if(SUCCEEDED(pIMDI->FindTypeRefByName((LPCSTR)szNamespace,(LPCSTR)szName,tkResScope,&tk))) return tk; else return 0; } mdToken ResolveReflectionNotation(BYTE* dataPtr, unsigned Lstr, IMDInternalImport *pIMDI, void* GUICookie) { char* str = new char[Lstr+1]; mdToken ret = 0; if(str) { char szNamespaceDefault[] = ""; char* szNamespace = szNamespaceDefault; char* szName = str; char* szAssembly = NULL; char szAssemblyMscorlib[] = "mscorlib"; char* pch; memcpy(str,dataPtr,Lstr); str[Lstr] = 0; //format: Namespace.Name, Assembly,... pch = strchr(str,','); if(pch) { *pch = 0; for(szAssembly = pch+1; *szAssembly == ' '; szAssembly++); pch = strchr(szAssembly,','); if(pch) *pch = 0; } pch = strrchr(str,'.'); if(pch) { *pch = 0; szNamespace = str; szName = pch+1; } if(szAssembly == NULL) { // Look in TypeDefs mdToken tk = ResolveTypeDefReflectionNotation(pIMDI,szNamespace,szName,mdTypeDefNil); if(tk != 0) ret = tk; else // TypeDef not found, try TypeRef from mscorlib szAssembly = szAssemblyMscorlib; } if(szAssembly != NULL) { // Look in TypeRefs // First, identify resolution scope _ASSERTE(*szName); ULONG mAsmRefs = pIMDI->GetCountWithTokenKind(mdtAssemblyRef); if(mAsmRefs) { mdToken tkResScope = 0; mdToken tk=TokenFromRid(mdtAssemblyRef,1), tkmax=TokenFromRid(mdtAssemblyRef,mAsmRefs); LPCSTR szAsmRefName; // these are dummies const void* pPKT, *pHash; ULONG ulPKT,ulHash; AssemblyMetaDataInternal MD; DWORD dwFlags; for (;tk <= tkmax; tk++) { if (FAILED(pIMDI->GetAssemblyRefProps(tk,&pPKT,&ulPKT,&szAsmRefName,&MD,&pHash,&ulHash,&dwFlags))) { continue; } if(0==strcmp(szAsmRefName,szAssembly)) { tkResScope = tk; break; } } if(tkResScope) { ret = ResolveTypeRefReflectionNotation(pIMDI,szNamespace,szName,tkResScope); } } } } VDELETE(str); return ret; } unsigned UnderlyingTypeOfEnumTypeDef(mdToken tk, IMDInternalImport *pIMDI) { // make sure it's a TypeDef if(TypeFromToken(tk) != mdtTypeDef) return 0; // make sure it's an enum mdToken tkParent; DWORD dwAttr; if (FAILED(pIMDI->GetTypeDefProps(tk,&dwAttr,&tkParent))) { return 0; } if(RidFromToken(tkParent)==0) return 0; LPCSTR szName, szNamespace; switch(TypeFromToken(tkParent)) { case mdtTypeDef: if (FAILED(pIMDI->GetNameOfTypeDef(tkParent, &szName, &szNamespace))) { return 0; } break; case mdtTypeRef: if (FAILED(pIMDI->GetNameOfTypeRef(tkParent, &szNamespace, &szName))) { return 0; } break; default: return 0; } if (strcmp(szName,"Enum") != 0 || strcmp(szNamespace,"System") != 0) { // the parent type is not System.Enum so this type has no underlying type return 0; } // OK, it's an enum; find its instance field and get its type HENUMInternal hEnum; mdToken tkField; if (FAILED(pIMDI->EnumInit(mdtFieldDef,tk,&hEnum))) { return 0; } while(pIMDI->EnumNext(&hEnum,&tkField)) { if (FAILED(pIMDI->GetFieldDefProps(tkField, &dwAttr))) { continue; } if (IsFdStatic(dwAttr)) { continue; } PCCOR_SIGNATURE psig; if (FAILED(pIMDI->GetSigOfFieldDef(tkField,(ULONG*)&dwAttr, &psig))) { continue; } pIMDI->EnumClose(&hEnum); return (unsigned) *(psig+1); } // no instance field found -- error! pIMDI->EnumClose(&hEnum); return 0; } mdToken TypeRefToTypeDef(mdToken tk, IMDInternalImport *pIMDI, IMDInternalImport **ppIMDInew) { mdToken tkEncloser = mdTypeDefNil; mdToken tkTypeDef = mdTypeDefNil; *ppIMDInew = NULL; // get the resolution scope of TypeRef mdToken tkRS; if (FAILED(pIMDI->GetResolutionScopeOfTypeRef(tk, &tkRS))) { goto AssignAndReturn; } if (TypeFromToken(tkRS) == mdtTypeRef) tkEncloser = TypeRefToTypeDef(tkRS,pIMDI,ppIMDInew); else if (TypeFromToken(tkRS) == mdtAssemblyRef) { *ppIMDInew = g_asmref_import[RidFromToken(tkRS)]; if (*ppIMDInew == NULL) { // get that assembly and open IMDInternalImport IMetaDataAssemblyImport* pAssemblyImport; if (FAILED(g_pPubImport->QueryInterface(IID_IMetaDataAssemblyImport, (void**) &pAssemblyImport))) goto AssignAndReturn; const void *pPKT, *pHash; ULONG cHash,cName; WCHAR wzName[2048]; ASSEMBLYMETADATA md; WCHAR wzLocale[1024]; DWORD dwFlags; IUnknown* pIAMDI[64]; memset(&md,0,sizeof(ASSEMBLYMETADATA)); md.szLocale = wzLocale; md.cbLocale = 1024; struct Param { IMetaDataAssemblyImport* pAssemblyImport; WCHAR *wzName; IUnknown **pIAMDI; ULONG cPKT; } param; param.pAssemblyImport = pAssemblyImport; param.wzName = wzName; param.pIAMDI = pIAMDI; pAssemblyImport->GetAssemblyRefProps(tkRS,&pPKT,&param.cPKT,wzName,2048,&cName,&md,&pHash,&cHash,&dwFlags); PAL_TRY(Param *, pParam, &param) { if(FAILED(pParam->pAssemblyImport->FindAssembliesByName(NULL,NULL,(LPCWSTR)pParam->wzName,pParam->pIAMDI,64,&pParam->cPKT))) pParam->cPKT=0; } PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) { param.cPKT=0; } PAL_ENDTRY pAssemblyImport->Release(); if(param.cPKT == 0) goto AssignAndReturn; _ASSERTE(pIAMDI[0] != NULL); IUnknown *pUnk; if(FAILED(pIAMDI[0]->QueryInterface(IID_IUnknown, (void**)&pUnk))) goto AssignAndReturn; if (FAILED(GetMetaDataInternalInterfaceFromPublic( pUnk, IID_IMDInternalImport, (LPVOID *)ppIMDInew))) { goto AssignAndReturn; } _ASSERTE(*ppIMDInew != NULL); g_asmref_import[RidFromToken(tkRS)] = *ppIMDInew; pUnk->Release(); for(cHash=0; cHash<param.cPKT; cHash++) if(pIAMDI[cHash]) pIAMDI[cHash]->Release(); } } if (*ppIMDInew != NULL) { LPCSTR szName, szNamespace; if (FAILED(pIMDI->GetNameOfTypeRef(tk, &szNamespace, &szName))) { tkTypeDef = mdTypeDefNil; goto AssignAndReturn; } if (FAILED((*ppIMDInew)->FindTypeDef(szNamespace,szName,tkEncloser,&tkTypeDef))) { tkTypeDef = mdTypeDefNil; } } AssignAndReturn: return tkTypeDef; } unsigned UnderlyingTypeOfEnum(mdToken tk, IMDInternalImport *pIMDI) { unsigned uRet = 0; unsigned ix = RidFromToken(tk); if(TypeFromToken(tk)==mdtTypeDef) { if(g_enum_td_type[ix] == 0xFF) { g_enum_td_type[ix] = (BYTE)UnderlyingTypeOfEnumTypeDef(tk,pIMDI); } return (unsigned)g_enum_td_type[ix]; } else if(TypeFromToken(tk)==mdtTypeRef) { if(g_enum_tr_type[ix] == 0xFF) { IMDInternalImport *pIMDInew = NULL; mdToken tkTypeDef = TypeRefToTypeDef(tk,pIMDI, &pIMDInew); if((RidFromToken(tkTypeDef)!=0)&&(pIMDInew != NULL)) { uRet = UnderlyingTypeOfEnumTypeDef(tkTypeDef,pIMDInew); } g_enum_tr_type[ix] = (BYTE)uRet; } return (unsigned)g_enum_tr_type[ix]; } else return 0; } /**************************************************************************/ /* move 'ptr past the exactly one type description */ BYTE* skipType(BYTE* ptr) { mdToken tk; AGAIN: switch(*ptr++) { case ELEMENT_TYPE_VOID : case ELEMENT_TYPE_BOOLEAN : case ELEMENT_TYPE_CHAR : case ELEMENT_TYPE_I1 : case ELEMENT_TYPE_U1 : case ELEMENT_TYPE_I2 : case ELEMENT_TYPE_U2 : case ELEMENT_TYPE_I4 : case ELEMENT_TYPE_U4 : case ELEMENT_TYPE_I8 : case ELEMENT_TYPE_U8 : case ELEMENT_TYPE_R4 : case ELEMENT_TYPE_R8 : case ELEMENT_TYPE_U : case ELEMENT_TYPE_I : case ELEMENT_TYPE_STRING : case ELEMENT_TYPE_OBJECT : case ELEMENT_TYPE_TYPEDBYREF : case ELEMENT_TYPE_SENTINEL : case SERIALIZATION_TYPE_TYPE : case SERIALIZATION_TYPE_TAGGED_OBJECT : /* do nothing */ break; case SERIALIZATION_TYPE_ENUM : { unsigned Lstr = CorSigUncompressData((PCCOR_SIGNATURE&)ptr); ptr += Lstr; break; } case ELEMENT_TYPE_VALUETYPE : case ELEMENT_TYPE_CLASS : ptr += CorSigUncompressToken(ptr, &tk); break; case ELEMENT_TYPE_CMOD_REQD : case ELEMENT_TYPE_CMOD_OPT : ptr += CorSigUncompressToken(ptr, &tk); goto AGAIN; case ELEMENT_TYPE_ARRAY : { ptr = skipType(ptr); // element Type unsigned rank = CorSigUncompressData((PCCOR_SIGNATURE&) ptr); if (rank != 0) { unsigned numSizes = CorSigUncompressData((PCCOR_SIGNATURE&) ptr); while(numSizes > 0) { CorSigUncompressData((PCCOR_SIGNATURE&) ptr); --numSizes; } unsigned numLowBounds = CorSigUncompressData((PCCOR_SIGNATURE&) ptr); while(numLowBounds > 0) { CorSigUncompressData((PCCOR_SIGNATURE&) ptr); --numLowBounds; } } } break; // Modifiers or depedant types case ELEMENT_TYPE_PINNED : case ELEMENT_TYPE_PTR : case ELEMENT_TYPE_BYREF : case ELEMENT_TYPE_SZARRAY : // tail recursion optimization // ptr = skipType(ptr, fFixupType); // break goto AGAIN; case ELEMENT_TYPE_VAR: case ELEMENT_TYPE_MVAR: CorSigUncompressData((PCCOR_SIGNATURE&) ptr); // bound break; case ELEMENT_TYPE_FNPTR: { CorSigUncompressData((PCCOR_SIGNATURE&) ptr); // calling convention unsigned argCnt = CorSigUncompressData((PCCOR_SIGNATURE&) ptr); // arg count ptr = skipType(ptr); // return type while(argCnt > 0) { ptr = skipType(ptr); --argCnt; } } break; case ELEMENT_TYPE_GENERICINST: { ptr = skipType(ptr); // type constructor unsigned argCnt = CorSigUncompressData((PCCOR_SIGNATURE&)ptr); // arg count while(argCnt > 0) { ptr = skipType(ptr); --argCnt; } } break; default: case ELEMENT_TYPE_END : _ASSERTE(!"Unknown Type"); break; } return(ptr); } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:21000) // Suppress PREFast warning about overly large function #endif BYTE* PrettyPrintCABlobValue(PCCOR_SIGNATURE &typePtr, BYTE* dataPtr, BYTE* dataEnd, CQuickBytes* out, IMDInternalImport *pIMDI, void* GUICookie) { char str[64]; char appendix[64]; int typ; BOOL Reiterate; BOOL CloseParenthesis; unsigned numElements = 1; unsigned n,Lstr; unsigned underType; mdToken tk; appendix[0] = 0; do { if(dataPtr >= dataEnd) { _ASSERTE(!"CA blob too short"); return FALSE; } Reiterate = FALSE; CloseParenthesis = TRUE; switch(typ = *typePtr++) { case ELEMENT_TYPE_VOID : return NULL; case ELEMENT_TYPE_BOOLEAN : appendStr(out,KEYWORD("bool")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); appendStr(out,(*dataPtr)? KEYWORD("true"):KEYWORD("false")); dataPtr++; } break; case ELEMENT_TYPE_CHAR : appendStr(out,KEYWORD("char")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"0x%4.4X",(WORD)GET_UNALIGNED_VAL16(dataPtr)); appendStr(out,str); dataPtr += 2; } break; case ELEMENT_TYPE_I1 : appendStr(out,KEYWORD("int8")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"%d",*((char*)dataPtr)); appendStr(out,str); dataPtr ++; } break; case ELEMENT_TYPE_U1 : appendStr(out,KEYWORD("uint8")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"%d",*dataPtr); appendStr(out,str); dataPtr ++; } break; case ELEMENT_TYPE_I2 : appendStr(out,KEYWORD("int16")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"%d",GET_UNALIGNED_VAL16(dataPtr)); appendStr(out,str); dataPtr +=2; } break; case ELEMENT_TYPE_U2 : appendStr(out,KEYWORD("uint16")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"%d",(WORD)GET_UNALIGNED_VAL16(dataPtr)); appendStr(out,str); dataPtr +=2; } break; case ELEMENT_TYPE_I4 : appendStr(out,KEYWORD("int32")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"%d",GET_UNALIGNED_VAL32(dataPtr)); appendStr(out,str); dataPtr +=4; } break; case ELEMENT_TYPE_U4 : appendStr(out,KEYWORD("uint32")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"%d",(unsigned)GET_UNALIGNED_VAL32(dataPtr)); appendStr(out,str); dataPtr +=4; } break; case ELEMENT_TYPE_I8 : appendStr(out,KEYWORD("int64")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"%I64d",GET_UNALIGNED_VAL64(dataPtr)); appendStr(out,str); dataPtr +=8; } break; case ELEMENT_TYPE_U8 : appendStr(out,KEYWORD("uint64")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); sprintf_s(str,64,"%I64d",(ULONGLONG)GET_UNALIGNED_VAL64(dataPtr)); appendStr(out,str); dataPtr +=8; } break; case ELEMENT_TYPE_R4 : appendStr(out,KEYWORD("float32")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); _gcvt_s(str,64,*((float*)dataPtr), 8); float df = (float)atof(str); // Must compare as underlying bytes, not floating point otherwise optmizier will // try to enregister and comapre 80-bit precision number with 32-bit precision number!!!! if((*(ULONG*)&df != (ULONG)GET_UNALIGNED_VAL32(dataPtr))||IsSpecialNumber(str)) sprintf_s(str, 64,"0x%08X",(ULONG)GET_UNALIGNED_VAL32(dataPtr)); appendStr(out,str); dataPtr +=4; } break; case ELEMENT_TYPE_R8 : appendStr(out,KEYWORD("float64")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); char *pch; _gcvt_s(str,64,*((double*)dataPtr), 17); double df = strtod(str, &pch); // Must compare as underlying bytes, not floating point otherwise optmizier will // try to enregister and comapre 80-bit precision number with 64-bit precision number!!!! if((*(ULONGLONG*)&df != (ULONGLONG)GET_UNALIGNED_VAL64(dataPtr))||IsSpecialNumber(str)) sprintf_s(str, 64, "0x%I64X",(ULONGLONG)GET_UNALIGNED_VAL64(dataPtr)); appendStr(out,str); dataPtr +=8; } break; case ELEMENT_TYPE_U : case ELEMENT_TYPE_I : return NULL; case ELEMENT_TYPE_OBJECT : case SERIALIZATION_TYPE_TAGGED_OBJECT: appendStr(out,KEYWORD("object")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { BYTE* dataPtr1 = skipType(dataPtr); if(n) appendStr(out," "); dataPtr = PrettyPrintCABlobValue((PCCOR_SIGNATURE&)dataPtr, dataPtr1, dataEnd, out, pIMDI,GUICookie); if (dataPtr == NULL) return NULL; } break; case ELEMENT_TYPE_STRING : appendStr(out,KEYWORD("string")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); if(*dataPtr == 0xFF) { appendStr(out,KEYWORD("nullref")); Lstr = 1; } else { appendStr(out,"'"); Lstr = CorSigUncompressData((PCCOR_SIGNATURE&)dataPtr); if(dataPtr + Lstr > dataEnd) return NULL; appendStr(out,UnquotedProperName((char*)dataPtr,Lstr)); appendStr(out,"'"); } dataPtr += Lstr; } break; case ELEMENT_TYPE_CLASS : typePtr += CorSigUncompressToken(typePtr, &tk); //skip the following token FALLTHROUGH; case SERIALIZATION_TYPE_TYPE : appendStr(out,KEYWORD("type")); appendStr(out,appendix); appendStr(out,"("); for(n=0; n < numElements; n++) { if(n) appendStr(out," "); if(*dataPtr == 0xFF) { appendStr(out,KEYWORD("nullref")); Lstr = 1; } else { Lstr = CorSigUncompressData((PCCOR_SIGNATURE&)dataPtr); if(dataPtr + Lstr > dataEnd) return NULL; tk = ResolveReflectionNotation(dataPtr,Lstr,pIMDI,GUICookie); if(IsNilToken(tk)) { appendStr(out,KEYWORD("class ")); appendStr(out,"'"); appendStr(out,UnquotedProperName((char*)dataPtr,Lstr)); appendStr(out,"'"); } else { PrettyPrintClass(out, tk, pIMDI); } } dataPtr += Lstr; } break; case ELEMENT_TYPE_VALUETYPE : typePtr += CorSigUncompressToken(typePtr, &tk); _ASSERTE(pIMDI->IsValidToken(tk)); goto GetUTSize; case SERIALIZATION_TYPE_ENUM : Lstr = CorSigUncompressData((PCCOR_SIGNATURE&)typePtr); tk = ResolveReflectionNotation((BYTE*)typePtr,Lstr,pIMDI,GUICookie); /* if(IsNilToken(tk)) { _ASSERTE(!"Failed to resolve Reflection notation for S_T_ENUM"); return NULL; } */ typePtr += Lstr; GetUTSize: underType = UnderlyingTypeOfEnum(tk, pIMDI); if(underType == 0) { // try to figure out the underlying type by its size switch(dataEnd - dataPtr) { case 1: // bool underType = ELEMENT_TYPE_BOOLEAN; break; case 2: // int16 underType = ELEMENT_TYPE_I2; break; case 4: // int32 underType = ELEMENT_TYPE_I4; break; case 8: // int64 underType = ELEMENT_TYPE_I8; break; default: return NULL; } //_ASSERTE(!"Failed to find underlying type for S_T_ENUM"); } { PCCOR_SIGNATURE ps = (PCCOR_SIGNATURE)&underType; dataPtr = PrettyPrintCABlobValue(ps, dataPtr, dataEnd, out, pIMDI,GUICookie); } CloseParenthesis = FALSE; break; case ELEMENT_TYPE_SZARRAY : numElements *= (unsigned)GET_UNALIGNED_VAL32(dataPtr); Reiterate = TRUE; sprintf_s(appendix,64,"[%d]",numElements); if(numElements == 0xFFFFFFFF) numElements = 0; dataPtr += 4; break; case ELEMENT_TYPE_ARRAY : case ELEMENT_TYPE_VAR : case ELEMENT_TYPE_MVAR : case ELEMENT_TYPE_FNPTR : case ELEMENT_TYPE_GENERICINST : case ELEMENT_TYPE_TYPEDBYREF : #ifdef LOGGING case ELEMENT_TYPE_INTERNAL : #endif // LOGGING return NULL; // Modifiers or depedent types case ELEMENT_TYPE_CMOD_OPT : case ELEMENT_TYPE_CMOD_REQD : case ELEMENT_TYPE_PINNED : Reiterate = TRUE; break; case ELEMENT_TYPE_PTR : case ELEMENT_TYPE_BYREF : return NULL; default: case ELEMENT_TYPE_SENTINEL : case ELEMENT_TYPE_END : _ASSERTE(!"Unknown Type"); return NULL; } // end switch } while(Reiterate); if(CloseParenthesis) appendStr(out,")"); return dataPtr; } #ifdef _PREFAST_ #pragma warning(pop) #endif BOOL PrettyPrintCustomAttributeNVPairs(unsigned nPairs, BYTE* dataPtr, BYTE* dataEnd, CQuickBytes* out, void* GUICookie) { IMDInternalImport *pIMDI = g_pImport; // ptr to IMDInternalImport class with ComSig while(dataPtr < dataEnd) { // field or property? switch(*dataPtr) { case SERIALIZATION_TYPE_FIELD: appendStr(out,KEYWORD("field ")); break; case SERIALIZATION_TYPE_PROPERTY: appendStr(out,KEYWORD("property ")); break; default: _ASSERTE(!"Invalid code of name/val pair in CA blob"); return FALSE; } dataPtr++; if(dataPtr >= dataEnd) { _ASSERTE(!"CA blob too short"); return FALSE; } // type of the field/property PCCOR_SIGNATURE dataTypePtr = (PCCOR_SIGNATURE)dataPtr; const char* szAppend = ""; if(*dataPtr == ELEMENT_TYPE_SZARRAY) // Only SZARRAY modifier can occur in ser.type { szAppend = "[]"; dataPtr++; } if(*dataPtr == SERIALIZATION_TYPE_TYPE) { appendStr(out,KEYWORD("type")); dataPtr++; } else if(*dataPtr == SERIALIZATION_TYPE_TAGGED_OBJECT) { appendStr(out,KEYWORD("object")); dataPtr++; } else if(*dataPtr == SERIALIZATION_TYPE_ENUM) { appendStr(out,KEYWORD("enum ")); dataPtr++; unsigned Lstr = CorSigUncompressData((PCCOR_SIGNATURE&)dataPtr); if(dataPtr + Lstr > dataEnd) return FALSE; mdToken tk = ResolveReflectionNotation(dataPtr,Lstr,pIMDI,GUICookie); if(IsNilToken(tk)) { appendStr(out,KEYWORD("class ")); appendStr(out,"'"); appendStr(out,UnquotedProperName((char*)dataPtr,Lstr)); appendStr(out,"'"); } else { PrettyPrintClass(out, tk, pIMDI); } dataPtr += Lstr; } else { szAppend = ""; dataPtr = (BYTE*)PrettyPrintType(dataTypePtr, out, pIMDI); } if(*szAppend != 0) appendStr(out,szAppend); if(dataPtr >= dataEnd) { _ASSERTE(!"CA blob too short"); return FALSE; } // name of the field/property unsigned Lstr = CorSigUncompressData((PCCOR_SIGNATURE&)dataPtr); if(dataPtr + Lstr > dataEnd) return FALSE; appendStr(out," '"); appendStr(out,UnquotedProperName((char*)dataPtr,Lstr)); appendStr(out,"' = "); dataPtr += Lstr; if(dataPtr >= dataEnd) { _ASSERTE(!"CA blob too short"); return FALSE; } // value of the field/property dataPtr = PrettyPrintCABlobValue(dataTypePtr, dataPtr, dataEnd, out, pIMDI,GUICookie); if(NULL == dataPtr) return FALSE; appendStr(out,"\n"); nPairs--; } _ASSERTE(nPairs == 0); return TRUE; } BOOL PrettyPrintCustomAttributeBlob(mdToken tkType, BYTE* pBlob, ULONG ulLen, void* GUICookie, __inout __nullterminated char* szString) { char* initszptr = szString + strlen(szString); PCCOR_SIGNATURE typePtr; // type to convert, ULONG typeLen; // the lenght of 'typePtr' CHECK_LOCAL_STATIC_VAR(static CQuickBytes out); // where to put the pretty printed string IMDInternalImport *pIMDI = g_pImport; // ptr to IMDInternalImport class with ComSig unsigned numArgs = 0; unsigned numTyArgs = 0; PCCOR_SIGNATURE typeEnd; unsigned callConv; BYTE* dataPtr = pBlob; BYTE* dataEnd = dataPtr + ulLen; WORD wNumNVPairs = 0; unsigned numElements = 0; if(TypeFromToken(tkType) == mdtMemberRef) { const char *szName_Ignore; if (FAILED(pIMDI->GetNameAndSigOfMemberRef(tkType,&typePtr,&typeLen, &szName_Ignore))) { return FALSE; } } else if(TypeFromToken(tkType) == mdtMethodDef) { if (FAILED(pIMDI->GetSigOfMethodDef(tkType, &typeLen, &typePtr))) { return FALSE; } } else return FALSE; typeEnd = typePtr + typeLen; callConv = CorSigUncompressData(typePtr); if (callConv & IMAGE_CEE_CS_CALLCONV_GENERIC) { numTyArgs = CorSigUncompressData(typePtr); return FALSE; // leave generic instantiations for later } numElements = numArgs = CorSigUncompressData(typePtr); out.Shrink(0); if (!isCallConv(callConv, IMAGE_CEE_CS_CALLCONV_GENERICINST)) { // skip return type typePtr = PrettyPrintType(typePtr, &out, pIMDI); out.Shrink(0); } appendStr(&out," = {"); dataPtr += 2; // skip blob prolog 0x0001 // dump the arguments while(typePtr < typeEnd) { if (*typePtr == ELEMENT_TYPE_SENTINEL) { typePtr++; } else { if (numArgs <= 0) break; dataPtr = PrettyPrintCABlobValue(typePtr, dataPtr, dataEnd-2, &out, pIMDI,GUICookie); if(NULL == dataPtr) return FALSE; appendStr(&out,"\n"); --numArgs; } } _ASSERTE(numArgs == 0); wNumNVPairs = (WORD)GET_UNALIGNED_VAL16(dataPtr); dataPtr+=2; numElements += wNumNVPairs; // arguments done, now to field/property name-val pairs if(!PrettyPrintCustomAttributeNVPairs((unsigned) wNumNVPairs, dataPtr, dataEnd, &out, GUICookie)) return FALSE; { char* sz = asString(&out); char* ch = sz; char* szbl; while((ch = strchr(ch,'\n'))) { *ch = 0; ch++; } // if the string is too long already, begin on next line if((initszptr - szString) > 80) { printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"%s ",g_szAsmCodeIndent); initszptr = &szString[strlen(szString)]; } sprintf_s(initszptr,SZSTRING_REMAINING_SIZE(initszptr), "%s", sz); initszptr += 4; // to compensate for " = {" szbl = szString + strlen(g_szAsmCodeIndent); for(unsigned n = 1; n < numElements; n++) { printLine(GUICookie, szString); sz = sz + strlen(sz) + 1; for(ch = szbl; ch < initszptr; ch++) *ch = ' '; sprintf_s(initszptr,SZSTRING_REMAINING_SIZE(initszptr), "%s", sz); } } strcat_s(initszptr, SZSTRING_REMAINING_SIZE(initszptr),"}"); if(g_fShowBytes) { printLine(GUICookie,szString); strcat_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH," // "); sprintf_s(szString,SZSTRING_SIZE,"%s = ( ",g_szAsmCodeIndent); DumpByteArray(szString,pBlob,ulLen,GUICookie); g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-8] = 0; } return TRUE; } void DumpCustomAttributeProps(mdToken tkCA, mdToken tkType, mdToken tkOwner, BYTE* pBlob, ULONG ulLen, void *GUICookie, bool bWithOwner) { char* szptr = &szString[0]; BOOL fCommentItOut = FALSE; if((TypeFromToken(tkType) == mdtMemberRef)||(TypeFromToken(tkType) == mdtMethodDef)) { mdToken tkParent; const char * pszClassName = NULL; const char * pszNamespace = NULL; if (TypeFromToken(tkType) == mdtMemberRef) { if (FAILED(g_pImport->GetParentOfMemberRef(tkType, &tkParent))) { szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), "Invalid MemberRef %08X record ", tkType); return; } } else { if (FAILED(g_pImport->GetParentToken(tkType, &tkParent))) { szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), "Invalid token %08X ", tkType); return; } } REGISTER_REF(tkOwner,tkType); // owner of the CA references the class amd method REGISTER_REF(tkOwner,tkParent); if (TypeFromToken(tkParent) == mdtTypeDef) { if (FAILED(g_pImport->GetNameOfTypeDef(tkParent, &pszClassName, &pszNamespace))) { szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), "Invalid TypeDef %08X record ", tkParent); return; } } else if (TypeFromToken(tkParent) == mdtTypeRef) { if (FAILED(g_pImport->GetNameOfTypeRef(tkParent, &pszNamespace, &pszClassName))) { szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), "Invalid TypeRef %08X record ", tkParent); return; } } if(pszClassName && pszNamespace && (strcmp(pszNamespace,"System.Diagnostics") == 0) && (strcmp(pszClassName,"DebuggableAttribute") == 0)) fCommentItOut = TRUE; } if(fCommentItOut) { printLine(GUICookie,COMMENT((char*)0)); // start multiline comment sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_AUTOCA),g_szAsmCodeIndent); printLine(GUICookie, szString); strcat_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH,"// "); } szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".custom")); if(bWithOwner) { if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),tkCA); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"("); switch(TypeFromToken(tkOwner)) { case mdtTypeDef : case mdtTypeRef : case mdtTypeSpec: PrettyPrintToken(szString, tkOwner, g_pImport,GUICookie,0); break; case mdtMemberRef: { PCCOR_SIGNATURE typePtr; const char* pszMemberName; ULONG cComSig; if (FAILED(g_pImport->GetNameAndSigOfMemberRef( tkOwner, &typePtr, &cComSig, &pszMemberName))) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"ERROR "); break; } unsigned callConv = CorSigUncompressData(typePtr); if (isCallConv(callConv, IMAGE_CEE_CS_CALLCONV_FIELD)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("field ")); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("method ")); PrettyPrintToken(szString, tkOwner, g_pImport,GUICookie,0); } break; case mdtMethodDef: szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), KEYWORD("method ")); PrettyPrintToken(szString, tkOwner, g_pImport,GUICookie,0); break; default : strcat_s(szptr, SZSTRING_REMAINING_SIZE(szptr),ERRORMSG("UNKNOWN_OWNER")); break; } szptr = &szString[strlen(szString)]; if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),tkOwner); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),") "); } else { if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X:%08X*/ "),tkCA,tkType); } switch(TypeFromToken(tkType)) { case mdtTypeDef : case mdtTypeRef : case mdtMemberRef: case mdtMethodDef: PrettyPrintToken(szString, tkType, g_pImport,GUICookie,0); break; default : strcat_s(szString, SZSTRING_SIZE,ERRORMSG("UNNAMED_CUSTOM_ATTR")); break; } szptr = &szString[strlen(szString)]; if(pBlob && ulLen) { if(!g_fCAVerbal || !PrettyPrintCustomAttributeBlob(tkType, pBlob, ulLen, GUICookie, szString)) { sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = ( "); DumpByteArray(szString,pBlob,ulLen,GUICookie); } } printLine(GUICookie, szString); if(fCommentItOut) { g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-4] = 0; printLine(GUICookie,COMMENT((char*)-1)); // end multiline comment } } void DumpCustomAttribute(mdCustomAttribute tkCA, void *GUICookie, bool bWithOwner) { mdToken tkType; BYTE* pBlob=NULL; ULONG ulLen=0; mdToken tkOwner; static mdToken tkMod = 0xFFFFFFFF; _ASSERTE((TypeFromToken(tkCA)==mdtCustomAttribute)&&(RidFromToken(tkCA)>0)); _ASSERTE(RidFromToken(tkCA) <= g_uNCA); if(tkMod == 0xFFFFFFFF) tkMod = g_pImport->GetModuleFromScope(); // can't use InternalImport here: need the tkOwner if (FAILED(g_pPubImport->GetCustomAttributeProps( // S_OK or error. tkCA, // [IN] CustomValue token. &tkOwner, // [OUT, OPTIONAL] Object token. &tkType, // [OUT, OPTIONAL] Put TypeDef/TypeRef token here. (const void **)&pBlob, // [OUT, OPTIONAL] Put pointer to data here. &ulLen))) // [OUT, OPTIONAL] Put size of date here. { return; } if(!RidFromToken(tkOwner)) return; DWORD i; for(i = 0; i < g_NumTypedefs; i++) { TypeDefDescr* pTDD = &((*g_typedefs)[i]); if(TypeFromToken(pTDD->tkTypeSpec) == mdtCustomAttribute) { mdToken tkTypeTD; mdToken tkOwnerTD; BYTE* pBlobTD=NULL; ULONG uLenTD=0; tkTypeTD = GET_UNALIGNED_VAL32(pTDD->psig); if(tkTypeTD != tkType) continue; tkOwnerTD = GET_UNALIGNED_VAL32(pTDD->psig + sizeof(mdToken)); if(pTDD->cb > 2*sizeof(mdToken)) { pBlobTD = (BYTE*)pTDD->psig + 2*sizeof(mdToken); uLenTD = pTDD->cb - 2*sizeof(mdToken); } if(uLenTD != ulLen) continue; if(memcmp(pBlobTD,pBlob,ulLen) != 0) continue; char* szptr = &szString[0]; szString[0] = 0; szptr += sprintf_s(szString,SZSTRING_SIZE,"%s%s", g_szAsmCodeIndent,JUMPPT(ProperName(pTDD->szName),pTDD->tkSelf)); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),tkCA); printLine(GUICookie,szString); break; } } if(i >= g_NumTypedefs) DumpCustomAttributeProps(tkCA,tkType,tkOwner,pBlob,ulLen,GUICookie,bWithOwner); _ASSERTE(g_rchCA); _ASSERTE(RidFromToken(tkCA) <= g_uNCA); g_rchCA[RidFromToken(tkCA)] = 1; } void DumpCustomAttributes(mdToken tkOwner, void *GUICookie) { if (g_fShowCA) { HENUMInternal hEnum; mdCustomAttribute tkCA; if (FAILED(g_pImport->EnumInit(mdtCustomAttribute, tkOwner,&hEnum))) { return; } while(g_pImport->EnumNext(&hEnum,&tkCA) && RidFromToken(tkCA)) { DumpCustomAttribute(tkCA,GUICookie,false); } g_pImport->EnumClose( &hEnum); } } void DumpDefaultValue(mdToken tok, __inout __nullterminated char* szString, void* GUICookie) { MDDefaultValue MDDV; char* szptr = &szString[strlen(szString)]; if (FAILED(g_pImport->GetDefaultValue(tok, &MDDV))) { szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), ERRORMSG(" /* Invalid default value for %08X: */"), tok); return; } switch(MDDV.m_bType) { case ELEMENT_TYPE_VOID: strcat_s(szString, SZSTRING_SIZE," /* NO CORRESPONDING RECORD IN CONSTANTS TABLE */"); break; case ELEMENT_TYPE_I1: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%02X)",KEYWORD("int8"),MDDV.m_byteValue); break; case ELEMENT_TYPE_U1: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%02X)",KEYWORD("uint8"),MDDV.m_byteValue); break; case ELEMENT_TYPE_I2: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%04X)",KEYWORD("int16"),MDDV.m_usValue); break; case ELEMENT_TYPE_U2: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%04X)",KEYWORD("uint16"),MDDV.m_usValue); break; case ELEMENT_TYPE_I4: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%08X)",KEYWORD("int32"),MDDV.m_ulValue); break; case ELEMENT_TYPE_U4: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%08X)",KEYWORD("uint32"),MDDV.m_ulValue); break; case ELEMENT_TYPE_CHAR: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%04X)",KEYWORD("char"),MDDV.m_usValue); break; case ELEMENT_TYPE_BOOLEAN: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s",KEYWORD("bool")); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"(%s)", KEYWORD((char *)(MDDV.m_byteValue ? "true" : "false"))); break; case ELEMENT_TYPE_I8: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%I64X)",KEYWORD("int64"),MDDV.m_ullValue); break; case ELEMENT_TYPE_U8: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(0x%I64X)",KEYWORD("uint64"),MDDV.m_ullValue); break; case ELEMENT_TYPE_R4: { char szf[32]; _gcvt_s(szf,32,MDDV.m_fltValue, 8); float df = (float)atof(szf); // Must compare as underlying bytes, not floating point otherwise optmizier will // try to enregister and comapre 80-bit precision number with 32-bit precision number!!!! if((*(ULONG*)&df == MDDV.m_ulValue)&&!IsSpecialNumber(szf)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(%s)",KEYWORD("float32"),szf); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), " = %s(0x%08X)",KEYWORD("float32"),MDDV.m_ulValue); } break; case ELEMENT_TYPE_R8: { char szf[32], *pch; _gcvt_s(szf,32,MDDV.m_dblValue, 17); double df = strtod(szf, &pch); //atof(szf); szf[31]=0; // Must compare as underlying bytes, not floating point otherwise optmizier will // try to enregister and comapre 80-bit precision number with 64-bit precision number!!!! if((*(ULONGLONG*)&df == MDDV.m_ullValue)&&!IsSpecialNumber(szf)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s(%s)",KEYWORD("float64"),szf); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), " = %s(0x%I64X) // %s",KEYWORD("float64"),MDDV.m_ullValue,szf); } break; case ELEMENT_TYPE_STRING: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = "); PAL_CPP_TRY { szptr = DumpUnicodeString(GUICookie,szString,(WCHAR*)MDDV.m_wzValue,MDDV.m_cbSize/sizeof(WCHAR)); } PAL_CPP_CATCH_ALL { strcat_s(szString, SZSTRING_SIZE,ERRORMSG("INVALID DATA ADDRESS")); } PAL_CPP_ENDTRY; break; case ELEMENT_TYPE_CLASS: if(MDDV.m_wzValue==NULL) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = %s",KEYWORD("nullref")); break; } //else fall thru to default case, to report the error FALLTHROUGH; default: szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),ERRORMSG(" /* ILLEGAL CONSTANT type:0x%02X, size:%d bytes, blob: "),MDDV.m_bType,MDDV.m_cbSize); if(MDDV.m_wzValue) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"("); PAL_CPP_TRY { DumpByteArray(szString,(BYTE*)MDDV.m_wzValue,MDDV.m_cbSize,GUICookie); } PAL_CPP_CATCH_ALL { szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),ERRORMSG(" Invalid blob at 0x%08X)"), MDDV.m_wzValue); } PAL_CPP_ENDTRY } else { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"NULL"); } strcat_s(szString, SZSTRING_REMAINING_SIZE(szptr), " */"); break; } } void DumpParams(ParamDescriptor* pPD, ULONG ulParams, void* GUICookie) { if(pPD) { for(ULONG i = ulParams; i<2*ulParams+1; i++) // pPD[ulParams] is return value { ULONG j = i % (ulParams+1); if(RidFromToken(pPD[j].tok)) { HENUMInternal hEnum; mdCustomAttribute tkCA; ULONG ulCAs= 0; if(g_fShowCA) { if (FAILED(g_pImport->EnumInit(mdtCustomAttribute, pPD[j].tok, &hEnum))) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: MetaData error enumerating CustomAttribute for %08X", g_szAsmCodeIndent, pPD[j].tok); printLine(GUICookie, szString); continue; } ulCAs = g_pImport->EnumGetCount(&hEnum); } if(ulCAs || IsPdHasDefault(pPD[j].attr)) { char *szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s [%d]",g_szAsmCodeIndent,KEYWORD(".param"),i-ulParams); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),pPD[j].tok); if(IsPdHasDefault(pPD[j].attr)) DumpDefaultValue(pPD[j].tok, szString, GUICookie); printLine(GUICookie, szString); if(ulCAs) { while(g_pImport->EnumNext(&hEnum,&tkCA) && RidFromToken(tkCA)) { DumpCustomAttribute(tkCA,GUICookie,false); } } } if(g_fShowCA) g_pImport->EnumClose( &hEnum); } } } } BOOL DumpPermissionSetBlob(void* GUICookie,__inout __nullterminated char* szString, BYTE* pvPermission, ULONG cbPermission) { if(*pvPermission == '.') { CQuickBytes out; pvPermission++; char* szptr_init = &szString[strlen(szString)]; char* szptr = szptr_init; appendStr(&out," = {"); unsigned nAttrs = CorSigUncompressData((PCCOR_SIGNATURE&)pvPermission); for(unsigned iAttr = 0; iAttr < nAttrs; iAttr++) { unsigned L = CorSigUncompressData((PCCOR_SIGNATURE&)pvPermission); // class name length mdToken tkAttr = ResolveReflectionNotation(pvPermission,L,g_pImport,GUICookie); if(IsNilToken(tkAttr)) { appendStr(&out,KEYWORD("class ")); appendStr(&out,"'"); appendStr(&out,UnquotedProperName((char*)pvPermission,L)); appendStr(&out,"'"); } else { PrettyPrintClass(&out, tkAttr, g_pImport); } pvPermission += L; appendStr(&out," = {"); // dump blob L = CorSigUncompressData((PCCOR_SIGNATURE&)pvPermission); // blob length if(L > 0) { BYTE* pvEnd = pvPermission+L; L = CorSigUncompressData((PCCOR_SIGNATURE&)pvPermission); // number of props if(L > 0) { if(!PrettyPrintCustomAttributeNVPairs(L, pvPermission, pvEnd, &out, GUICookie)) return FALSE; out.Shrink(out.Size()-1); } pvPermission = pvEnd; } appendStr(&out, iAttr == nAttrs-1 ? "}" : "}, "); } appendStr(&out, "}"); char* sz = asString(&out); while(char* pc = strstr(sz,"}, ")) { *(pc+2) = 0; strcpy_s(szptr,SZSTRING_REMAINING_SIZE(szptr), sz); printLine(GUICookie,szString); sz = pc+3; if(szptr == szptr_init) szptr += 4; // to compensate for = { for(pc = szString; pc < szptr; pc++) *pc = ' '; } strcpy_s(szptr, SZSTRING_REMAINING_SIZE(szptr),sz); return TRUE; } return FALSE; } void DumpPermissions(mdToken tkOwner, void* GUICookie) { HCORENUM hEnum = NULL; static mdPermission rPerm[16384]; ULONG count; HRESULT hr; //static char szString[4096]; // can't use internal import here: EnumInit not impl. for mdtPrmission while (SUCCEEDED(hr = g_pPubImport->EnumPermissionSets( &hEnum, tkOwner, 0, rPerm, 16384, &count)) && count > 0) { for (ULONG i = 0; i < count; i++) { DWORD dwAction; const BYTE *pvPermission=NULL; ULONG cbPermission=0; const char *szAction; char *szptr; szptr = &szString[0]; if(SUCCEEDED(hr = g_pPubImport->GetPermissionSetProps( rPerm[i], &dwAction, (const void**)&pvPermission, &cbPermission))) { szptr += sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".permissionset")); switch(dwAction) { case dclActionNil: szAction = ""; break; case dclRequest: szAction = KEYWORD("request"); break; case dclDemand: szAction = KEYWORD("demand"); break; case dclAssert: szAction = KEYWORD("assert"); break; case dclDeny: szAction = KEYWORD("deny"); break; case dclPermitOnly: szAction = KEYWORD("permitonly"); break; case dclLinktimeCheck: szAction = KEYWORD("linkcheck"); break; case dclInheritanceCheck: szAction = KEYWORD("inheritcheck"); break; case dclRequestMinimum: szAction = KEYWORD("reqmin"); break; case dclRequestOptional: szAction = KEYWORD("reqopt"); break; case dclRequestRefuse: szAction = KEYWORD("reqrefuse"); break; case dclPrejitGrant: szAction = KEYWORD("prejitgrant"); break; case dclPrejitDenied: szAction = KEYWORD("prejitdeny"); break; case dclNonCasDemand: szAction = KEYWORD("noncasdemand"); break; case dclNonCasLinkDemand: szAction = KEYWORD("noncaslinkdemand"); break; case dclNonCasInheritance: szAction = KEYWORD("noncasinheritance"); break; default: szAction = ERRORMSG("<UNKNOWN_ACTION>"); break; } szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),szAction); if(pvPermission && cbPermission) { printLine(GUICookie, szString); sprintf_s(szString,SZSTRING_SIZE,"%s ",g_szAsmCodeIndent); if(!DumpPermissionSetBlob(GUICookie,szString,(BYTE*)pvPermission,cbPermission)) { strcat_s(szString,SZSTRING_SIZE,KEYWORD("bytearray")); strcat_s(szString,SZSTRING_SIZE," ("); DumpByteArray(szString, pvPermission, cbPermission, GUICookie); } printLine(GUICookie,szString); } else // i.e. if pvPermission == NULL or cbPermission == NULL { sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," = ()"); printLine(GUICookie,szString); } DumpCustomAttributes(rPerm[i],GUICookie); }// end if(GetPermissionProps) } // end for(all permissions) }//end while(EnumPermissionSets) g_pPubImport->CloseEnum( hEnum); } void PrettyPrintMethodSig(__inout __nullterminated char* szString, unsigned* puStringLen, CQuickBytes* pqbMemberSig, PCCOR_SIGNATURE pComSig, ULONG cComSig, __inout __nullterminated char* buff, _In_opt_z_ char* szArgPrefix, void* GUICookie) { unsigned uMaxWidth = 40; if(g_fDumpHTML || g_fDumpRTF) uMaxWidth = 240; if(*buff && (strlen(szString) > (size_t)uMaxWidth)) { printLine(GUICookie,szString); strcpy_s(szString,SZSTRING_SIZE,g_szAsmCodeIndent); strcat_s(szString,SZSTRING_SIZE," "); // to align with ".method " } appendStr(pqbMemberSig, szString); { char* pszTailSig = (char*)PrettyPrintSig(pComSig, cComSig, buff, pqbMemberSig, g_pImport, szArgPrefix); if(*buff) { size_t L = strlen(pszTailSig); char* newbuff = new char[strlen(buff)+3]; sprintf_s(newbuff,strlen(buff)+3," %s(", buff); char* pszOffset = strstr(pszTailSig,newbuff); if(pszOffset) { char* pszTailSigRemainder = new char[L+1]; if(pszOffset - pszTailSig > (int)uMaxWidth) { char* pszOffset2 = strstr(pszTailSig," marshal("); if(pszOffset2 && (pszOffset2 < pszOffset)) { *pszOffset2 = 0; strcpy_s(pszTailSigRemainder,L,pszOffset2+1); printLine(GUICookie,pszTailSig); strcpy_s(pszTailSig,L,g_szAsmCodeIndent); strcat_s(pszTailSig,L," "); // to align with ".method " strcat_s(pszTailSig,L,pszTailSigRemainder); pszOffset = strstr(pszTailSig,newbuff); } *pszOffset = 0 ; strcpy_s(pszTailSigRemainder,L,pszOffset+1); printLine(GUICookie,pszTailSig); strcpy_s(pszTailSig,L,g_szAsmCodeIndent); strcat_s(pszTailSig,L," "); // to align with ".method " strcat_s(pszTailSig,L,pszTailSigRemainder); pszOffset = strstr(pszTailSig,newbuff); } size_t i, j, k, l, indent = pszOffset - pszTailSig + strlen(buff) + 2; char chAfterComma; char *pComma = pszTailSig+strlen(buff), *pch; while((pComma = strchr(pComma,','))) { for(pch = pszTailSig, i=0, j = 0, k=0, l=0; pch < pComma; pch++) { if(*pch == '\\') pch++; else { if(*pch == '\'') j=1-j; else if(*pch == '\"') k=1-k; else if(j==0) { if(*pch == '[') i++; else if(*pch == ']') i--; else if(strncmp(pch,LTN(),strlen(LTN()))==0) l++; else if(strncmp(pch,GTN(),strlen(GTN()))==0) l--; } } } pComma++; if((i==0)&&(j==0)&&(k==0)&&(l==0))// no brackets/quotes or all opened/closed { chAfterComma = *pComma; strcpy_s(pszTailSigRemainder,L,pComma); *pComma = 0; printLine(GUICookie,pszTailSig); *pComma = chAfterComma; for(i=0; i<indent; i++) pszTailSig[i] = ' '; strcpy_s(&pszTailSig[indent],L-indent,pszTailSigRemainder); pComma = pszTailSig; } } if(*puStringLen < (unsigned)strlen(pszTailSig)+128) { //free(szString); *puStringLen = (unsigned)strlen(pszTailSig)+128; // need additional space for "il managed" etc. //szString = (char*)malloc(*puStringLen); } VDELETE(pszTailSigRemainder); } strcpy_s(szString,SZSTRING_SIZE,pszTailSig); VDELETE(newbuff); } else // it's for GUI, don't split it into several lines { size_t L = strlen(szString); if(L < 2048) { L = 2048-L; strncpy_s(szString,SZSTRING_SIZE,pszTailSig,L); } } } } // helper to avoid mixing of SEH and stack objects with destructors BOOL DisassembleWrapper(IMDInternalImport *pImport, BYTE *ILHeader, void *GUICookie, mdToken FuncToken, ParamDescriptor* pszArgname, ULONG ulArgs) { BOOL fRet = FALSE; //char szString[4096]; PAL_CPP_TRY { fRet = Disassemble(pImport, ILHeader, GUICookie, FuncToken, pszArgname, ulArgs); } PAL_CPP_CATCH_ALL { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_DASMERR),g_szAsmCodeIndent); printLine(GUICookie, szString); } PAL_CPP_ENDTRY return fRet; } BOOL PrettyPrintGP( // prints name of generic param, or returns FALSE mdToken tkOwner, // Class, method or 0 CQuickBytes *out, // where to put the pretty printed generic param int n) // Index of generic param { BOOL ret = FALSE; if(tkOwner && ((TypeFromToken(tkOwner)==mdtTypeDef)||(TypeFromToken(tkOwner)==mdtMethodDef))) { DWORD NumTyPars; HENUMInternal hEnumTyPar; if(SUCCEEDED(g_pImport->EnumInit(mdtGenericParam,tkOwner,&hEnumTyPar))) { NumTyPars = g_pImport->EnumGetCount(&hEnumTyPar); if(NumTyPars > (DWORD)n) { // need this for name dup check LPCSTR *pszName = new LPCSTR[NumTyPars]; if(pszName != NULL) { ULONG ulSequence; DWORD ix,nx; mdToken tk; for(ix = 0, nx = 0xFFFFFFFF; ix < NumTyPars; ix++) { if(g_pImport->EnumNext(&hEnumTyPar,&tk)) { if(SUCCEEDED(g_pImport->GetGenericParamProps(tk,&ulSequence,NULL,NULL,NULL,&pszName[ix]))) { if(ulSequence == (ULONG)n) nx = ix; } } } // if there are dup names, use !0 or !!0 if(nx != 0xFFFFFFFF) { for(ix = 0; ix < nx; ix++) { if(strcmp(pszName[ix],pszName[nx]) == 0) break; } if(ix >= nx) { for(ix = nx+1; ix < NumTyPars; ix++) { if(strcmp(pszName[ix],pszName[nx]) == 0) break; } if(ix >= NumTyPars) { appendStr(out, ProperName((char*)(pszName[nx]))); ret = TRUE; } } } // end if(tkTyPar != 0) delete [] pszName; } // end if(pszName != NULL) } // end if(NumTyPars > (DWORD)n) } // end if(SUCCEEDED(g_pImport->EnumInit(mdtGenericParam,tkOwner,&hEnumTyPar))) g_pImport->EnumClose(&hEnumTyPar); } // end if(tkOwner) return ret; } // Pretty-print formal type parameters for a class or method char *DumpGenericPars(_Inout_updates_(SZSTRING_SIZE) char* szString, mdToken tok, void* GUICookie/*=NULL*/, BOOL fSplit/*=FALSE*/) { WCHAR *wzArgName = wzUniBuf; ULONG chName; mdToken tkConstr[2048]; DWORD NumTyPars; DWORD NumConstrs; mdGenericParam tkTyPar; ULONG ulSequence; DWORD attr; mdToken tkOwner; HCORENUM hEnumTyPar = NULL; HCORENUM hEnumTyParConstr = NULL; char* szptr = &szString[strlen(szString)]; char* szbegin; unsigned i; if (FAILED(g_pPubImport->EnumGenericParams(&hEnumTyPar, tok, &tkTyPar, 1, &NumTyPars))) return NULL; if (NumTyPars > 0) { szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),LTN()); szbegin = szptr; for (i = 1; NumTyPars != 0; i++) { g_pPubImport->GetGenericParamProps(tkTyPar, &ulSequence, &attr, &tkOwner, NULL, wzArgName, UNIBUF_SIZE/2, &chName); //if(wcslen(wzArgName) >= MAX_CLASSNAME_LENGTH) // wzArgName[MAX_CLASSNAME_LENGTH-1] = 0; hEnumTyParConstr = NULL; if (FAILED(g_pPubImport->EnumGenericParamConstraints(&hEnumTyParConstr, tkTyPar, tkConstr, 2048, &NumConstrs))) { g_pPubImport->CloseEnum(hEnumTyPar); return NULL; } *szptr = 0; CHECK_REMAINING_SIZE; switch (attr & gpVarianceMask) { case gpCovariant : szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), "+ "); break; case gpContravariant : szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), "- "); break; } CHECK_REMAINING_SIZE; if ((attr & gpReferenceTypeConstraint) != 0) szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), "class "); CHECK_REMAINING_SIZE; if ((attr & gpNotNullableValueTypeConstraint) != 0) szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), "valuetype "); CHECK_REMAINING_SIZE; if ((attr & gpDefaultConstructorConstraint) != 0) szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), ".ctor "); CHECK_REMAINING_SIZE; if (NumConstrs) { CQuickBytes out; mdToken tkConstrType,tkOwner; szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"("); DWORD ix; for (ix=0; ix<NumConstrs; ix++) { if (FAILED(g_pPubImport->GetGenericParamConstraintProps(tkConstr[ix], &tkOwner, &tkConstrType))) return NULL; if(ix) szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),", "); CHECK_REMAINING_SIZE; out.Shrink(0); szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s",PrettyPrintClass(&out,tkConstrType,g_pImport)); CHECK_REMAINING_SIZE; } if(ix < NumConstrs) break; szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),") "); CHECK_REMAINING_SIZE; } // re-get name, wzUniBuf may not contain it any more g_pPubImport->GetGenericParamProps(tkTyPar, NULL, &attr, NULL, NULL, wzArgName, UNIBUF_SIZE/2, &chName); //if(wcslen(wzArgName) >= MAX_CLASSNAME_LENGTH) // wzArgName[MAX_CLASSNAME_LENGTH-1] = 0; if (chName) { char* sz = (char*)(&wzUniBuf[UNIBUF_SIZE/2]); WszWideCharToMultiByte(CP_UTF8,0,wzArgName,-1,sz,UNIBUF_SIZE,NULL,NULL); szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s",ProperName(sz)); } CHECK_REMAINING_SIZE; if (FAILED(g_pPubImport->EnumGenericParams(&hEnumTyPar, tok, &tkTyPar, 1, &NumTyPars))) return NULL; if (NumTyPars != 0) { *szptr++ = ','; if(fSplit && (i == 4)) { *szptr = 0; printLine(GUICookie,szString); i = 0; // mind i++ at the end of the loop for(szptr = szString; szptr < szbegin; szptr++) *szptr = ' '; } } } // end for (i = 1; NumTyPars != 0; i++) if(NumTyPars != 0) // all type parameters can't fit in szString, error { strcpy_s(szptr,4,"..."); szptr += 3; } else szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),GTN()); } // end if (NumTyPars > 0) *szptr = 0; if(hEnumTyPar) g_pPubImport->CloseEnum(hEnumTyPar); return szptr; } void DumpGenericParsCA(mdToken tok, void* GUICookie/*=NULL*/) { DWORD NumTyPars; mdGenericParam tkTyPar; HCORENUM hEnumTyPar = NULL; unsigned i; WCHAR *wzArgName = wzUniBuf; ULONG chName; DWORD attr; if(g_fShowCA) { for(i=0; SUCCEEDED(g_pPubImport->EnumGenericParams(&hEnumTyPar, tok, &tkTyPar, 1, &NumTyPars)) &&(NumTyPars > 0); i++) { HENUMInternal hEnum; mdCustomAttribute tkCA; ULONG ulCAs= 0; if (FAILED(g_pImport->EnumInit(mdtCustomAttribute, tkTyPar, &hEnum))) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: MetaData error enumerating CustomAttribute for %08X", g_szAsmCodeIndent, tkTyPar); printLine(GUICookie, szString); return; } ulCAs = g_pImport->EnumGetCount(&hEnum); if(ulCAs) { char *szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".param type")); if(SUCCEEDED(g_pPubImport->GetGenericParamProps(tkTyPar, NULL, &attr, NULL, NULL, wzArgName, UNIBUF_SIZE/2, &chName)) &&(chName > 0)) { //if(wcslen(wzArgName) >= MAX_CLASSNAME_LENGTH) // wzArgName[MAX_CLASSNAME_LENGTH-1] = 0; char* sz = (char*)(&wzUniBuf[UNIBUF_SIZE/2]); WszWideCharToMultiByte(CP_UTF8,0,wzArgName,-1,sz,UNIBUF_SIZE,NULL,NULL); szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s ",ProperName(sz)); } else szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"[%d] ",i+1); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),tkTyPar); printLine(GUICookie, szString); strcat_s(g_szAsmCodeIndent, MAX_MEMBER_LENGTH, " "); while(g_pImport->EnumNext(&hEnum,&tkCA) && RidFromToken(tkCA)) { DumpCustomAttribute(tkCA,GUICookie,false); } g_szAsmCodeIndent[strlen(g_szAsmCodeIndent) - 2] = 0; } g_pImport->EnumClose( &hEnum); // mdtCustomAttribute ULONG ulSequence; DWORD attr; mdToken tkOwner; HCORENUM hEnumTyParConstraint; mdToken tkConstraint[2048]; DWORD NumConstraints; g_pPubImport->GetGenericParamProps(tkTyPar, &ulSequence, &attr, &tkOwner, NULL, wzArgName, UNIBUF_SIZE / 2, &chName); hEnumTyParConstraint = NULL; if (FAILED(g_pPubImport->EnumGenericParamConstraints(&hEnumTyParConstraint, tkTyPar, tkConstraint, 2048, &NumConstraints))) { g_pPubImport->CloseEnum(hEnumTyPar); return; } if (NumConstraints > 0) { CQuickBytes out; mdToken tkConstraintType; mdToken tkGenericParam; ULONG ulSequence; for (DWORD ix = 0; ix < NumConstraints; ix++) { mdGenericParamConstraint tkParamConstraint = tkConstraint[ix]; if (FAILED(g_pPubImport->GetGenericParamConstraintProps(tkParamConstraint, &tkGenericParam, &tkConstraintType))) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: MetaData error in GetGenericParamConstraintProps for %08X", g_szAsmCodeIndent, tkParamConstraint); return; } if (FAILED(g_pImport->EnumInit(mdtCustomAttribute, tkParamConstraint, &hEnum))) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: MetaData error enumerating CustomAttribute for mdGenericParamConstraint %08X", g_szAsmCodeIndent, tkParamConstraint); printLine(GUICookie, szString); return; } ulCAs = g_pImport->EnumGetCount(&hEnum); if (ulCAs) { char *szptr = &szString[0]; szptr += sprintf_s(szptr, SZSTRING_SIZE, "%s%s ", g_szAsmCodeIndent, KEYWORD(".param constraint")); if (FAILED(g_pPubImport->GetGenericParamProps(tkGenericParam, &ulSequence, &attr, NULL, NULL, wzArgName, UNIBUF_SIZE / 2, &chName))) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: MetaData error in GetGenericParamProps for %08X", g_szAsmCodeIndent, tkGenericParam); printLine(GUICookie, szString); return; } if (chName > 0) { char* sz = (char*)(&wzUniBuf[UNIBUF_SIZE / 2]); WszWideCharToMultiByte(CP_UTF8, 0, wzArgName, -1, sz, UNIBUF_SIZE, NULL, NULL); szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), " %s", ProperName(sz)); } else { szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), " [%d]", ulSequence + 1); } if (g_fDumpTokens) { szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), COMMENT("/*%08X*/ "), tkGenericParam); } szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), ", "); out.Shrink(0); szptr += sprintf_s(szptr, SZSTRING_REMAINING_SIZE(szptr), "%s", PrettyPrintClass(&out, tkConstraintType, g_pImport)); printLine(GUICookie, szString); strcat_s(g_szAsmCodeIndent, MAX_MEMBER_LENGTH, " "); while (g_pImport->EnumNext(&hEnum, &tkCA) && RidFromToken(tkCA)) { DumpCustomAttribute(tkCA, GUICookie, false); } g_szAsmCodeIndent[strlen(g_szAsmCodeIndent) - 2] = 0; } g_pImport->EnumClose(&hEnum); // mdtCustomAttribute } } } //end for(i=0;... } //end if(g_fShowCA) } // Sets *pbOverridingTypeSpec to TRUE if we are overriding a method declared by a type spec or // if the method has a signature which does not exactly match between the overrider and overridee. // That case is commonly caused by covariant overrides. // In that case the syntax is slightly different (there are additional 'method' keywords). // Refer to Expert .NET 2.0 IL Assembler page 242. void PrettyPrintOverrideDecl(ULONG i, __inout __nullterminated char* szString, void* GUICookie, mdToken tkOverrider, BOOL *pbOverridingTypeSpec) { const char * pszMemberName; mdToken tkDecl,tkDeclParent=0; char szBadToken[256]; char pszTailSigDefault[] = ""; char* pszTailSig = pszTailSigDefault; CQuickBytes qbInstSig; char* szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".override")); tkDecl = (*g_pmi_list)[i].tkDecl; *pbOverridingTypeSpec = FALSE; if(g_pImport->IsValidToken(tkDecl)) { bool needsFullTokenPrint = false; bool hasTkDeclParent = false; // Determine if the decl is a typespec method, in which case the "method" syntax + full token print // must be used to generate the disassembly. if(SUCCEEDED(g_pImport->GetParentToken(tkDecl,&tkDeclParent))) { if(g_pImport->IsValidToken(tkDeclParent)) { if(TypeFromToken(tkDeclParent) == mdtMethodDef) //get the parent's parent { mdTypeRef cr1; if(FAILED(g_pImport->GetParentToken(tkDeclParent,&cr1))) cr1 = mdTypeRefNil; tkDeclParent = cr1; } if(RidFromToken(tkDeclParent)) { if(TypeFromToken(tkDeclParent)==mdtTypeSpec) { needsFullTokenPrint = true; } hasTkDeclParent = true; } } else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s",ERRORMSG("INVALID OVERRIDDEN METHOD'S PARENT TOKEN")); } // Determine if the sig of the decl does not match the sig of the body // In that case the full "method" syntax must be used if ((TypeFromToken(tkOverrider) == mdtMethodDef) && !needsFullTokenPrint) { PCCOR_SIGNATURE pComSigDecl = NULL; ULONG cComSigDecl = 0; mdToken tkDeclSigTok = tkDecl; bool successfullyGotDeclSig = false; bool successfullyGotBodySig = false; if (TypeFromToken(tkDeclSigTok) == mdtMethodSpec) { mdToken meth=0; if (SUCCEEDED(g_pImport->GetMethodSpecProps(tkDeclSigTok, &meth, NULL, NULL))) { tkDeclSigTok = meth; } } if (TypeFromToken(tkDeclSigTok) == mdtMethodDef) { if (SUCCEEDED(g_pImport->GetSigOfMethodDef(tkDeclSigTok, &cComSigDecl, &pComSigDecl))) { successfullyGotDeclSig = true; } } else if (TypeFromToken(tkDeclSigTok) == mdtMemberRef) { const char *pszMemberNameUnused; if (SUCCEEDED(g_pImport->GetNameAndSigOfMemberRef( tkDeclSigTok, &pComSigDecl, &cComSigDecl, &pszMemberNameUnused))) { successfullyGotDeclSig = true; } } PCCOR_SIGNATURE pComSigBody; ULONG cComSigBody; if (SUCCEEDED(g_pImport->GetSigOfMethodDef(tkOverrider, &cComSigBody, &pComSigBody))) { successfullyGotBodySig = true; } if (successfullyGotDeclSig && successfullyGotBodySig) { if (cComSigBody != cComSigDecl) { needsFullTokenPrint = true; } else if (memcmp(pComSigBody, pComSigDecl, cComSigBody) != 0) { needsFullTokenPrint = true; } // Signature are binary identical, full sig printing not needed } else { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s",ERRORMSG("INVALID BODY OR DECL SIG")); } } if (needsFullTokenPrint) { // In this case, the shortcut syntax cannot be used, and a full token must be printed. // Print the full token and return. szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), " %s ",KEYWORD("method")); PrettyPrintToken(szString,tkDecl,g_pImport,GUICookie,tkOverrider); *pbOverridingTypeSpec = TRUE; return; } if (hasTkDeclParent) { // If the tkDeclParent was successfully retrieved during parent discovery print it here. PrettyPrintToken(szString, tkDeclParent, g_pImport,GUICookie,tkOverrider); strcat_s(szString, SZSTRING_SIZE,"::"); szptr = &szString[strlen(szString)]; } if(TypeFromToken(tkDecl) == mdtMethodSpec) { mdToken meth=0; PCCOR_SIGNATURE pSig=NULL; ULONG cSig=0; if (FAILED(g_pImport->GetMethodSpecProps(tkDecl, &meth, &pSig, &cSig))) { meth = mdTokenNil; pSig = NULL; cSig = 0; } if (pSig && cSig) { qbInstSig.Shrink(0); pszTailSig = (char*)PrettyPrintSig(pSig, cSig, "", &qbInstSig, g_pImport, NULL); } tkDecl = meth; } if(TypeFromToken(tkDecl) == mdtMethodDef) { if (FAILED(g_pImport->GetNameOfMethodDef(tkDecl, &pszMemberName))) { sprintf_s(szBadToken,256,ERRORMSG("INVALID RECORD: 0x%8.8X"),tkDecl); pszMemberName = (const char *)szBadToken; } } else if(TypeFromToken(tkDecl) == mdtMemberRef) { PCCOR_SIGNATURE pComSig; ULONG cComSig; if (FAILED(g_pImport->GetNameAndSigOfMemberRef( tkDecl, &pComSig, &cComSig, &pszMemberName))) { sprintf_s(szBadToken,256,ERRORMSG("INVALID RECORD: 0x%8.8X"),tkDecl); pszMemberName = (const char *)szBadToken; } } else { sprintf_s(szBadToken,256,ERRORMSG("INVALID TOKEN: 0x%8.8X"),tkDecl); pszMemberName = (const char*)szBadToken; } MAKE_NAME_IF_NONE(pszMemberName,tkDecl); } else { sprintf_s(szBadToken,256,ERRORMSG("INVALID TOKEN: 0x%8.8X"),tkDecl); pszMemberName = (const char*)szBadToken; } szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s%s",ProperName((char*)pszMemberName),pszTailSig); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT(" /*%08X::%08X*/ "),tkDeclParent,(*g_pmi_list)[i].tkDecl); } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:21000) // Suppress PREFast warning about overly large function #endif BOOL DumpMethod(mdToken FuncToken, const char *pszClassName, DWORD dwEntryPointToken,void *GUICookie,BOOL DumpBody) { const char *pszMemberName = NULL;//[MAX_MEMBER_LENGTH]; const char *pszMemberSig = NULL; DWORD dwAttrs = 0; DWORD dwImplAttrs; DWORD dwOffset; DWORD dwTargetRVA; CQuickBytes qbMemberSig; PCCOR_SIGNATURE pComSig = NULL; ULONG cComSig; char *buff = NULL;//[MAX_MEMBER_LENGTH]; ParamDescriptor* pszArgname = NULL; ULONG ulArgs=0; unsigned retParamIx = 0; unsigned uStringLen = SZSTRING_SIZE; char szArgPrefix[MAX_PREFIX_SIZE]; char* szptr = NULL; mdToken tkMVarOwner = g_tkMVarOwner; if (FAILED(g_pImport->GetMethodDefProps(FuncToken, &dwAttrs))) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: MethodDef %08X has wrong record", g_szAsmCodeIndent, FuncToken); printError(GUICookie, ERRORMSG(szString)); return FALSE; } if (g_fLimitedVisibility) { if(g_fHidePub && IsMdPublic(dwAttrs)) return FALSE; if(g_fHidePriv && IsMdPrivate(dwAttrs)) return FALSE; if(g_fHideFam && IsMdFamily(dwAttrs)) return FALSE; if(g_fHideAsm && IsMdAssem(dwAttrs)) return FALSE; if(g_fHideFOA && IsMdFamORAssem(dwAttrs)) return FALSE; if(g_fHideFAA && IsMdFamANDAssem(dwAttrs)) return FALSE; if(g_fHidePrivScope && IsMdPrivateScope(dwAttrs)) return FALSE; } if (FAILED(g_pImport->GetMethodImplProps(FuncToken, &dwOffset, &dwImplAttrs))) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: Invalid MethodImpl %08X record", g_szAsmCodeIndent, FuncToken); printError(GUICookie, ERRORMSG(szString)); return FALSE; } if (FAILED(g_pImport->GetNameOfMethodDef(FuncToken, &pszMemberName))) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: MethodDef %08X has wrong record", g_szAsmCodeIndent, FuncToken); printError(GUICookie, ERRORMSG(szString)); return FALSE; } MAKE_NAME_IF_NONE(pszMemberName,FuncToken); if (FAILED(g_pImport->GetSigOfMethodDef(FuncToken, &cComSig, &pComSig))) { pComSig = NULL; } if (cComSig == NULL) { sprintf_s(szString, SZSTRING_SIZE, "%sERROR: method '%s' has no signature", g_szAsmCodeIndent, pszMemberName); printError(GUICookie, ERRORMSG(szString)); return FALSE; } bool bRet = FALSE; PAL_CPP_TRY { g_tkMVarOwner = FuncToken; szString[0] = 0; DumpGenericPars(szString,FuncToken); //,NULL,FALSE); pszMemberSig = PrettyPrintSig(pComSig, cComSig, szString, &qbMemberSig, g_pImport,NULL); } PAL_CPP_CATCH_ALL { printError(GUICookie,"INVALID DATA ADDRESS"); bRet = TRUE; } PAL_CPP_ENDTRY; if (bRet) { g_tkMVarOwner = tkMVarOwner; return FALSE; } if (g_Mode == MODE_DUMP_CLASS_METHOD || g_Mode == MODE_DUMP_CLASS_METHOD_SIG) { if (strcmp(pszMemberName, g_pszMethodToDump) != 0) return FALSE; if (g_Mode == MODE_DUMP_CLASS_METHOD_SIG) { // we want plain signature without token values const char *pszPlainSig; if (g_fDumpTokens) { // temporarily disable token dumping g_fDumpTokens = FALSE; PAL_CPP_TRY { CQuickBytes qbTempSig; pszPlainSig = PrettyPrintSig(pComSig, cComSig, "", &qbTempSig, g_pImport, NULL); } PAL_CPP_CATCH_ALL { pszPlainSig = ""; } PAL_CPP_ENDTRY; g_fDumpTokens = TRUE; } else { pszPlainSig = pszMemberSig; } if (strcmp(pszPlainSig, g_pszSigToDump) != 0) return FALSE; } } if(!DumpBody) { printLine(GUICookie,(char*)pszMemberSig); g_tkMVarOwner = tkMVarOwner; return TRUE; } szptr = &szString[0]; szString[0] = 0; if(DumpBody) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s%s ",g_szAsmCodeIndent,ANCHORPT(KEYWORD(".method"),FuncToken)); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s ",ANCHORPT(KEYWORD(".method"),FuncToken)); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),FuncToken); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)0)); if(IsMdPublic(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"public "); if(IsMdPrivate(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"private "); if(IsMdFamily(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"family "); if(IsMdAssem(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"assembly "); if(IsMdFamANDAssem(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"famandassem "); if(IsMdFamORAssem(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"famorassem "); if(IsMdPrivateScope(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"privatescope "); if(IsMdHideBySig(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"hidebysig "); if(IsMdNewSlot(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"newslot "); if(IsMdSpecialName(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"specialname "); if(IsMdRTSpecialName(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"rtspecialname "); if (IsMdStatic(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"static "); if (IsMdAbstract(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"abstract "); if (dwAttrs & 0x00000200) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"strict "); if (IsMdVirtual(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"virtual "); if (IsMdFinal(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"final "); if (IsMdUnmanagedExport(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"unmanagedexp "); if(IsMdRequireSecObject(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"reqsecobj "); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)-1)); if (IsMdPinvokeImpl(dwAttrs)) { DWORD dwMappingFlags; const char *szImportName; mdModuleRef mrImportDLL; szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s(",KEYWORD("pinvokeimpl")); if(FAILED(g_pImport->GetPinvokeMap(FuncToken,&dwMappingFlags, &szImportName,&mrImportDLL))) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/* No map */")); else szptr=DumpPinvokeMap(dwMappingFlags, (strcmp(szImportName,pszMemberName)? szImportName : NULL), mrImportDLL,szString,GUICookie); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),") "); } // A little hack to get the formatting we need for Assem. buff = new char[SZSTRING_SIZE]; if(buff==NULL) { printError(GUICookie,"Out of memory"); g_tkMVarOwner = tkMVarOwner; return FALSE; } g_fThisIsInstanceMethod = !IsMdStatic(dwAttrs); { const char *psz = NULL; if(IsMdPrivateScope(dwAttrs)) sprintf_s(buff,SZSTRING_SIZE,"%s$PST%08X", pszMemberName,FuncToken ); else strcpy_s(buff,SZSTRING_SIZE, pszMemberName ); psz = ProperName(buff); if(psz != buff) { strcpy_s(buff,SZSTRING_SIZE,psz); } } DumpGenericPars(buff, FuncToken); //, NULL, FALSE); qbMemberSig.Shrink(0); // Get the argument names, if any strcpy_s(szArgPrefix,MAX_PREFIX_SIZE,(g_fThisIsInstanceMethod ? "A1": "A0")); { PCCOR_SIGNATURE typePtr = pComSig; unsigned ulCallConv = CorSigUncompressData(typePtr); // get the calling convention out of the way if (ulCallConv & IMAGE_CEE_CS_CALLCONV_GENERIC) CorSigUncompressData(typePtr); // get the num of generic args out of the way unsigned numArgs = CorSigUncompressData(typePtr)+1; HENUMInternal hArgEnum; mdParamDef tkArg; if (FAILED(g_pImport->EnumInit(mdtParamDef,FuncToken,&hArgEnum))) { printError(GUICookie, "Invalid MetaDataFormat"); g_tkMVarOwner = tkMVarOwner; return FALSE; } ulArgs = g_pImport->EnumGetCount(&hArgEnum); retParamIx = numArgs-1; if (ulArgs < numArgs) ulArgs = numArgs; if (ulArgs != 0) { pszArgname = new ParamDescriptor[ulArgs+2]; memset(pszArgname,0,(ulArgs+2)*sizeof(ParamDescriptor)); LPCSTR szName; ULONG ulSequence, ix; USHORT wSequence; DWORD dwAttr; ULONG j; for (j=0; g_pImport->EnumNext(&hArgEnum,&tkArg) && RidFromToken(tkArg); j++) { if (FAILED(g_pImport->GetParamDefProps(tkArg, &wSequence, &dwAttr, &szName))) { char sz[256]; sprintf_s(sz, ARRAY_SIZE(sz), RstrUTF(IDS_E_INVALIDRECORD), tkArg); printError(GUICookie, sz); continue; } ulSequence = wSequence; if (ulSequence > ulArgs+1) { char sz[256]; sprintf_s(sz,256,RstrUTF(IDS_E_PARAMSEQNO),j,ulSequence,ulSequence); printError(GUICookie,sz); } else { ix = retParamIx; if (ulSequence != 0) { ix = ulSequence-1; if (*szName != 0) { pszArgname[ix].name = new char[strlen(szName)+1]; strcpy_s(pszArgname[ix].name,strlen(szName)+1,szName); } } pszArgname[ix].attr = dwAttr; pszArgname[ix].tok = tkArg; } }// end for( along the params) for (j=0; j <numArgs; j++) { if(pszArgname[j].name == NULL) // we haven't got the name! { pszArgname[j].name = new char[16]; *pszArgname[j].name = 0; } if(*pszArgname[j].name == 0) // we haven't got the name! { sprintf_s(pszArgname[j].name,16,"A_%d",g_fThisIsInstanceMethod ? j+1 : j); } }// end for( along the argnames) sprintf_s(szArgPrefix,MAX_PREFIX_SIZE,"@%Id0",(size_t)pszArgname); } //end if (ulArgs) g_pImport->EnumClose(&hArgEnum); } g_tkRefUser = FuncToken; PrettyPrintMethodSig(szString, &uStringLen, &qbMemberSig, pComSig, cComSig, buff, szArgPrefix, GUICookie); g_tkRefUser = 0; szptr = &szString[strlen(szString)]; szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)0)); if(IsMiNative(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," native"); if(IsMiIL(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," cil"); if(IsMiOPTIL(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," optil"); if(IsMiRuntime(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," runtime"); if(IsMiUnmanaged(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," unmanaged"); if(IsMiManaged(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," managed"); if(IsMiPreserveSig(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," preservesig"); if(IsMiForwardRef(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," forwardref"); if(IsMiInternalCall(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," internalcall"); if(IsMiSynchronized(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," synchronized"); if(IsMiNoInlining(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," noinlining"); if(IsMiAggressiveInlining(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," aggressiveinlining"); if(IsMiNoOptimization(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," nooptimization"); if(IsMiAggressiveOptimization(dwImplAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," aggressiveoptimization"); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)-1)); printLine(GUICookie, szString); VDELETE(buff); if(!DumpBody) { g_tkMVarOwner = tkMVarOwner; return TRUE; } if(g_fShowBytes) { if (FAILED(g_pImport->GetSigOfMethodDef(FuncToken, &cComSig, &pComSig))) { sprintf_s(szString,SZSTRING_SIZE,"%sERROR: method %08X has wrong record",g_szAsmCodeIndent,FuncToken); printError(GUICookie,ERRORMSG(szString)); return FALSE; } const char* szt = "SIG:"; for(ULONG i=0; i<cComSig;) { szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s// %s", g_szAsmCodeIndent, szt); while(i<cComSig) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," %02X",pComSig[i]); i++; if((i & 0x1F)==0) break; // print only 32 per line } printLine(GUICookie, COMMENT(szString)); szt = " "; } } szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s", g_szAsmCodeIndent,SCOPE()); printLine(GUICookie, szString); szptr = &szString[0]; strcat_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH," "); // We have recoreded the entry point token from the CLR Header. Check to see if this // method is the entry point. if(FuncToken == static_cast<mdToken>(dwEntryPointToken)) { sprintf_s(szString,SZSTRING_SIZE,"%s%s", g_szAsmCodeIndent,KEYWORD(".entrypoint")); printLine(GUICookie, szString); } DumpCustomAttributes(FuncToken,GUICookie); DumpGenericParsCA(FuncToken,GUICookie); DumpParams(pszArgname, retParamIx, GUICookie); DumpPermissions(FuncToken,GUICookie); // Check if the method represents entry in VTable fixups and in EATable { ULONG j; for(j=0; j<g_nVTableRef; j++) { if((*g_prVTableRef)[j].tkTok == FuncToken) { sprintf_s(szString,SZSTRING_SIZE,"%s%s %d : %d", g_szAsmCodeIndent,KEYWORD(".vtentry"),(*g_prVTableRef)[j].wEntry+1,(*g_prVTableRef)[j].wSlot+1); printLine(GUICookie, szString); break; } } for(j=0; j<g_nEATableRef; j++) { if((*g_prEATableRef)[j].tkTok == FuncToken) { szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s [%d] ", g_szAsmCodeIndent,KEYWORD(".export"),j+g_nEATableBase); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s %s", KEYWORD("as"), ProperName((*g_prEATableRef)[j].pszName)); printLine(GUICookie, szString); break; } } } // Dump method impls of this method: for(ULONG i = 0; i < g_NumMI; i++) { if((*g_pmi_list)[i].tkBody == FuncToken) { BOOL bOverridingTypeSpec; PrettyPrintOverrideDecl(i,szString,GUICookie,FuncToken,&bOverridingTypeSpec); printLine(GUICookie,szString); } } dwTargetRVA = dwOffset; if (IsMdPinvokeImpl(dwAttrs)) { if(dwOffset) { sprintf_s(szString,SZSTRING_SIZE,"%s// Embedded native code",g_szAsmCodeIndent); printLine(GUICookie, COMMENT(szString)); goto ItsMiNative; } if(g_szAsmCodeIndent[0]) g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-2] = 0; sprintf_s(szString,SZSTRING_SIZE,"%s%s",g_szAsmCodeIndent,UNSCOPE()); printLine(GUICookie, szString); g_tkMVarOwner = tkMVarOwner; return TRUE; } if(IsMiManaged(dwImplAttrs)) { if(IsMiIL(dwImplAttrs) || IsMiOPTIL(dwImplAttrs)) { if(g_fShowBytes) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_METHBEG), g_szAsmCodeIndent,dwTargetRVA); printLine(GUICookie, COMMENT(szString)); } szString[0] = 0; if (dwTargetRVA != 0) { void* newTarget = NULL; if(g_pPELoader->getVAforRVA(dwTargetRVA,&newTarget)) { DisassembleWrapper(g_pImport, (unsigned char*)newTarget, GUICookie, FuncToken,pszArgname, ulArgs); } else { sprintf_s(szString,SZSTRING_SIZE, "INVALID METHOD ADDRESS: 0x%8.8zX (RVA: 0x%8.8X)",(size_t)newTarget,dwTargetRVA); printError(GUICookie,szString); } } } else if(IsMiNative(dwImplAttrs)) { ItsMiNative: sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_DASMNATIVE), g_szAsmCodeIndent); printLine(GUICookie, COMMENT(szString)); sprintf_s(szString,SZSTRING_SIZE,"%s// Managed TargetRVA = 0x%8.8X", g_szAsmCodeIndent, dwTargetRVA); printLine(GUICookie, COMMENT(szString)); } } else if(IsMiUnmanaged(dwImplAttrs)&&IsMiNative(dwImplAttrs)) { _ASSERTE(IsMiNative(dwImplAttrs)); sprintf_s(szString,SZSTRING_SIZE,"%s// Unmanaged TargetRVA = 0x%8.8X", g_szAsmCodeIndent, dwTargetRVA); printLine(GUICookie, COMMENT(szString)); } else if(IsMiRuntime(dwImplAttrs)) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_METHODRT), g_szAsmCodeIndent); printLine(GUICookie, COMMENT(szString)); } #ifdef _DEBUG else _ASSERTE(!"Bad dwImplAttrs"); #endif if(g_szAsmCodeIndent[0]) g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-2] = 0; { szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,UNSCOPE()); if(pszClassName) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("// end of method %s::"), ProperName((char*)pszClassName)); strcpy_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT(ProperName((char*)pszMemberName))); } else sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("// end of global method %s"), ProperName((char*)pszMemberName)); } printLine(GUICookie, szString); szString[0] = 0; printLine(GUICookie, szString); if(pszArgname) { for(ULONG i=0; i < ulArgs; i++) { if(pszArgname[i].name) VDELETE(pszArgname[i].name); } VDELETE(pszArgname); } g_tkMVarOwner = tkMVarOwner; return TRUE; } #ifdef _PREFAST_ #pragma warning(pop) #endif BOOL DumpField(mdToken FuncToken, const char *pszClassName,void *GUICookie, BOOL DumpBody) { char *pszMemberName = NULL;//[MAX_MEMBER_LENGTH]; DWORD dwAttrs = 0; CQuickBytes qbMemberSig; PCCOR_SIGNATURE pComSig = NULL; ULONG cComSig; const char *szStr = NULL;//[1024]; char* szptr; const char *psz; if (FAILED(g_pImport->GetNameOfFieldDef(FuncToken, &psz))) { char sz[2048]; sprintf_s(sz, 2048, "%sERROR: FieldDef %08X has no signature", g_szAsmCodeIndent, FuncToken); printError(GUICookie, sz); return FALSE; } MAKE_NAME_IF_NONE(psz,FuncToken); if (FAILED(g_pImport->GetFieldDefProps(FuncToken, &dwAttrs))) { char sz[2048]; sprintf_s(sz, 2048, "%sERROR: FieldDef %08X record error", g_szAsmCodeIndent, FuncToken); printError(GUICookie, sz); return FALSE; } if (g_fLimitedVisibility) { if(g_fHidePub && IsFdPublic(dwAttrs)) return FALSE; if(g_fHidePriv && IsFdPrivate(dwAttrs)) return FALSE; if(g_fHideFam && IsFdFamily(dwAttrs)) return FALSE; if(g_fHideAsm && IsFdAssembly(dwAttrs)) return FALSE; if(g_fHideFOA && IsFdFamORAssem(dwAttrs)) return FALSE; if(g_fHideFAA && IsFdFamANDAssem(dwAttrs)) return FALSE; if(g_fHidePrivScope && IsFdPrivateScope(dwAttrs)) return FALSE; } { const char* psz1 = NULL; if(IsFdPrivateScope(dwAttrs)) { pszMemberName = new char[strlen(psz)+15]; sprintf_s(pszMemberName,strlen(psz)+15,"%s$PST%08X", psz,FuncToken ); } else { pszMemberName = new char[strlen(psz)+3]; strcpy_s(pszMemberName, strlen(psz)+3, psz ); } psz1 = ProperName(pszMemberName); VDELETE(pszMemberName); pszMemberName = new char[strlen(psz1)+1]; strcpy_s(pszMemberName,strlen(psz1)+1,psz1); } if (FAILED(g_pImport->GetSigOfFieldDef(FuncToken, &cComSig, &pComSig))) { pComSig = NULL; } if (cComSig == NULL) { char sz[2048]; sprintf_s(sz,2048,"%sERROR: field '%s' has no signature",g_szAsmCodeIndent,pszMemberName); VDELETE(pszMemberName); printError(GUICookie,sz); return FALSE; } g_tkRefUser = FuncToken; bool bRet = FALSE; PAL_CPP_TRY { szStr = PrettyPrintSig(pComSig, cComSig, (DumpBody ? pszMemberName : ""), &qbMemberSig, g_pImport,NULL); } PAL_CPP_CATCH_ALL { printError(GUICookie,"INVALID ADDRESS IN FIELD SIGNATURE"); bRet = TRUE; } PAL_CPP_ENDTRY; if (bRet) return FALSE; g_tkRefUser = 0; if (g_Mode == MODE_DUMP_CLASS_METHOD || g_Mode == MODE_DUMP_CLASS_METHOD_SIG) { if (strcmp(pszMemberName, g_pszMethodToDump) != 0) { VDELETE(pszMemberName); return FALSE; } if (g_Mode == MODE_DUMP_CLASS_METHOD_SIG) { // we want plain signature without token values and without the field name BOOL fDumpTokens = g_fDumpTokens; g_fDumpTokens = FALSE; const char *pszPlainSig; PAL_CPP_TRY { CQuickBytes qbTempSig; pszPlainSig = PrettyPrintSig(pComSig, cComSig, "", &qbTempSig, g_pImport, NULL); } PAL_CPP_CATCH_ALL { pszPlainSig = ""; } PAL_CPP_ENDTRY; g_fDumpTokens = fDumpTokens; if (strcmp(pszPlainSig, g_pszSigToDump) != 0) { VDELETE(pszMemberName); return FALSE; } } } VDELETE(pszMemberName); szptr = &szString[0]; if(DumpBody) { szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ", g_szAsmCodeIndent,ANCHORPT(KEYWORD(".field"),FuncToken)); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),FuncToken); } // put offset (if any) for(ULONG i=0; i < g_cFieldOffsets; i++) { if(g_rFieldOffset[i].ridOfField == FuncToken) { if(g_rFieldOffset[i].ulOffset != 0xFFFFFFFF) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"[%d] ",g_rFieldOffset[i].ulOffset); break; } } szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)0)); if(IsFdPublic(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"public "); if(IsFdPrivate(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"private "); if(IsFdStatic(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"static "); if(IsFdFamily(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"family "); if(IsFdAssembly(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"assembly "); if(IsFdFamANDAssem(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"famandassem "); if(IsFdFamORAssem(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"famorassem "); if(IsFdPrivateScope(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"privatescope "); if(IsFdInitOnly(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"initonly "); if(IsFdLiteral(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"literal "); if(IsFdNotSerialized(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"notserialized "); if(IsFdSpecialName(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"specialname "); if(IsFdRTSpecialName(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"rtspecialname "); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)-1)); if (IsFdPinvokeImpl(dwAttrs)) { DWORD dwMappingFlags; const char *szImportName; mdModuleRef mrImportDLL; szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s(",KEYWORD("pinvokeimpl")); if(FAILED(g_pImport->GetPinvokeMap(FuncToken,&dwMappingFlags, &szImportName,&mrImportDLL))) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/* No map */")); else szptr = DumpPinvokeMap(dwMappingFlags, (strcmp(szImportName,psz)? szImportName : NULL), mrImportDLL, szString,GUICookie); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),") "); } szptr = DumpMarshaling(g_pImport,szString,SZSTRING_SIZE,FuncToken); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s",szStr); if (IsFdHasFieldRVA(dwAttrs)) // Do we have an RVA associated with this? { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr), KEYWORD(" at ")); ULONG fieldRVA; if (SUCCEEDED(g_pImport->GetFieldRVA(FuncToken, &fieldRVA))) { szptr = DumpDataPtr(&szString[strlen(szString)], fieldRVA, SizeOfField(FuncToken,g_pImport)); } else { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),RstrUTF(IDS_E_NORVA)); } } // dump default value (if any): if(IsFdHasDefault(dwAttrs) && DumpBody) DumpDefaultValue(FuncToken,szString,GUICookie); printLine(GUICookie, szString); if(DumpBody) { DumpCustomAttributes(FuncToken,GUICookie); DumpPermissions(FuncToken,GUICookie); } return TRUE; } BOOL DumpEvent(mdToken FuncToken, const char *pszClassName, DWORD dwClassAttrs, void *GUICookie, BOOL DumpBody) { DWORD dwAttrs; mdToken tkEventType; LPCSTR psz; HENUMInternal hAssoc; ASSOCIATE_RECORD rAssoc[128]; CQuickBytes qbMemberSig; ULONG nAssoc; char* szptr; if (FAILED(g_pImport->GetEventProps(FuncToken,&psz,&dwAttrs,&tkEventType))) { char sz[2048]; sprintf_s(sz, 2048, "%sERROR: Invalid Event %08X record", g_szAsmCodeIndent, FuncToken); printError(GUICookie, sz); return FALSE; } MAKE_NAME_IF_NONE(psz,FuncToken); if (g_Mode == MODE_DUMP_CLASS_METHOD || g_Mode == MODE_DUMP_CLASS_METHOD_SIG) { if (strcmp(psz, g_pszMethodToDump) != 0) return FALSE; } if (FAILED(g_pImport->EnumAssociateInit(FuncToken,&hAssoc))) { char sz[2048]; sprintf_s(sz, 2048, "%sERROR: MetaData error enumerating Associate for %08X", g_szAsmCodeIndent, FuncToken); printError(GUICookie, sz); return FALSE; } if ((nAssoc = hAssoc.m_ulCount)) { memset(rAssoc,0,sizeof(rAssoc)); if (FAILED(g_pImport->GetAllAssociates(&hAssoc,rAssoc,nAssoc))) { char sz[2048]; sprintf_s(sz, 2048, "%sERROR: MetaData error enumerating all Associates", g_szAsmCodeIndent); printError(GUICookie, sz); return FALSE; } if (g_fLimitedVisibility) { unsigned i; for (i=0; i < nAssoc;i++) { if ((TypeFromToken(rAssoc[i].m_memberdef) == mdtMethodDef) && g_pImport->IsValidToken(rAssoc[i].m_memberdef)) { DWORD dwMethodAttrs; if (FAILED(g_pImport->GetMethodDefProps(rAssoc[i].m_memberdef, &dwMethodAttrs))) { continue; } if(g_fHidePub && IsMdPublic(dwMethodAttrs)) continue; if(g_fHidePriv && IsMdPrivate(dwMethodAttrs)) continue; if(g_fHideFam && IsMdFamily(dwMethodAttrs)) continue; if(g_fHideAsm && IsMdAssem(dwMethodAttrs)) continue; if(g_fHideFOA && IsMdFamORAssem(dwMethodAttrs)) continue; if(g_fHideFAA && IsMdFamANDAssem(dwMethodAttrs)) continue; if(g_fHidePrivScope && IsMdPrivateScope(dwMethodAttrs)) continue; break; } } if (i >= nAssoc) return FALSE; } } szptr = &szString[0]; if (DumpBody) { szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ", g_szAsmCodeIndent,KEYWORD(".event")); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),FuncToken); } else { szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s : ",ProperName((char*)psz)); } if(IsEvSpecialName(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("specialname ")); if(IsEvRTSpecialName(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("rtspecialname ")); if(RidFromToken(tkEventType)&&g_pImport->IsValidToken(tkEventType)) { switch(TypeFromToken(tkEventType)) { case mdtTypeRef: case mdtTypeDef: case mdtTypeSpec: { PrettyPrintToken(szString, tkEventType, g_pImport,GUICookie,0); szptr = &szString[strlen(szString)]; } break; default: break; } } if(!DumpBody) { printLine(GUICookie,szString); return TRUE; } szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," %s", ProperName((char*)psz)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"%s%s",g_szAsmCodeIndent,SCOPE()); printLine(GUICookie,szString); strcat_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH," "); DumpCustomAttributes(FuncToken,GUICookie); DumpPermissions(FuncToken,GUICookie); if(nAssoc) { for(unsigned i=0; i < nAssoc;i++) { mdToken tk = rAssoc[i].m_memberdef; DWORD sem = rAssoc[i].m_dwSemantics; szptr = &szString[0]; if(IsMsAddOn(sem)) szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".addon")); else if(IsMsRemoveOn(sem)) szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".removeon")); else if(IsMsFire(sem)) szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".fire")); else if(IsMsOther(sem)) szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".other")); else szptr+=sprintf_s(szptr,SZSTRING_SIZE,ERRORMSG("UNKNOWN SEMANTICS: 0x%X "),sem); if(g_pImport->IsValidToken(tk)) PrettyPrintToken(szString, tk, g_pImport,GUICookie,0); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),ERRORMSG("INVALID TOKEN 0x%8.8X"),tk); printLine(GUICookie,szString); } } if(g_szAsmCodeIndent[0]) g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-2] = 0; szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,UNSCOPE()); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("// end of event %s::"),ProperName((char*)pszClassName)); strcpy_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT(ProperName((char*)psz))); printLine(GUICookie,szString); return TRUE; } BOOL DumpProp(mdToken FuncToken, const char *pszClassName, DWORD dwClassAttrs, void *GUICookie, BOOL DumpBody) { DWORD dwAttrs; LPCSTR psz; HENUMInternal hAssoc; ASSOCIATE_RECORD rAssoc[128]; CQuickBytes qbMemberSig; PCCOR_SIGNATURE pComSig; ULONG cComSig, nAssoc; unsigned uStringLen = SZSTRING_SIZE; char* szptr; if (FAILED(g_pImport->GetPropertyProps(FuncToken,&psz,&dwAttrs,&pComSig,&cComSig))) { char sz[2048]; sprintf_s(sz, 2048, "%sERROR: Invalid Property %08X record", g_szAsmCodeIndent, FuncToken); printError(GUICookie, sz); return FALSE; } MAKE_NAME_IF_NONE(psz,FuncToken); if(cComSig == 0) { char sz[2048]; sprintf_s(sz,2048,"%sERROR: property '%s' has no signature",g_szAsmCodeIndent,psz); printError(GUICookie,sz); return FALSE; } if (g_Mode == MODE_DUMP_CLASS_METHOD || g_Mode == MODE_DUMP_CLASS_METHOD_SIG) { if (strcmp(psz, g_pszMethodToDump) != 0) return FALSE; } if (FAILED(g_pImport->EnumAssociateInit(FuncToken,&hAssoc))) { char sz[2048]; sprintf_s(sz, 2048, "%sERROR: MetaData error enumerating Associate for %08X", g_szAsmCodeIndent, FuncToken); printError(GUICookie, sz); return FALSE; } if ((nAssoc = hAssoc.m_ulCount) != 0) { memset(rAssoc,0,sizeof(rAssoc)); if (FAILED(g_pImport->GetAllAssociates(&hAssoc,rAssoc,nAssoc))) { char sz[2048]; sprintf_s(sz, 2048, "%sERROR: MetaData error enumerating all Associates", g_szAsmCodeIndent); printError(GUICookie, sz); return FALSE; } if (g_fLimitedVisibility) { unsigned i; for (i=0; i < nAssoc;i++) { if ((TypeFromToken(rAssoc[i].m_memberdef) == mdtMethodDef) && g_pImport->IsValidToken(rAssoc[i].m_memberdef)) { DWORD dwMethodAttrs; if (FAILED(g_pImport->GetMethodDefProps(rAssoc[i].m_memberdef, &dwMethodAttrs))) { continue; } if(g_fHidePub && IsMdPublic(dwMethodAttrs)) continue; if(g_fHidePriv && IsMdPrivate(dwMethodAttrs)) continue; if(g_fHideFam && IsMdFamily(dwMethodAttrs)) continue; if(g_fHideAsm && IsMdAssem(dwMethodAttrs)) continue; if(g_fHideFOA && IsMdFamORAssem(dwMethodAttrs)) continue; if(g_fHideFAA && IsMdFamANDAssem(dwMethodAttrs)) continue; if(g_fHidePrivScope && IsMdPrivateScope(dwMethodAttrs)) continue; break; } } if( i >= nAssoc) return FALSE; } } szptr = &szString[0]; if (DumpBody) { szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ", g_szAsmCodeIndent,KEYWORD(".property")); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%08X*/ "),FuncToken); } else { szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s : ",ProperName((char*)psz)); } if(IsPrSpecialName(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("specialname ")); if(IsPrRTSpecialName(dwAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("rtspecialname ")); { char pchDefault[] = ""; char *pch = pchDefault; if(DumpBody) { pch = szptr+1; strcpy_s(pch,SZSTRING_REMAINING_SIZE(pch),ProperName((char*)psz)); } qbMemberSig.Shrink(0); PrettyPrintMethodSig(szString, &uStringLen, &qbMemberSig, pComSig, cComSig, pch, NULL, GUICookie); if(IsPrHasDefault(dwAttrs) && DumpBody) DumpDefaultValue(FuncToken,szString,GUICookie); } printLine(GUICookie,szString); if(DumpBody) { sprintf_s(szString,SZSTRING_SIZE,"%s%s",g_szAsmCodeIndent,SCOPE()); printLine(GUICookie,szString); strcat_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH," "); DumpCustomAttributes(FuncToken,GUICookie); DumpPermissions(FuncToken,GUICookie); if(nAssoc) { for(unsigned i=0; i < nAssoc;i++) { mdToken tk = rAssoc[i].m_memberdef; DWORD sem = rAssoc[i].m_dwSemantics; szptr = &szString[0]; if(IsMsSetter(sem)) szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".set")); else if(IsMsGetter(sem)) szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".get")); else if(IsMsOther(sem)) szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".other")); else szptr+=sprintf_s(szptr,SZSTRING_SIZE,ERRORMSG("UNKNOWN SEMANTICS: 0x%X "),sem); if(g_pImport->IsValidToken(tk)) PrettyPrintToken(szString, tk, g_pImport,GUICookie,0); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),ERRORMSG("INVALID TOKEN 0x%8.8X"),tk); printLine(GUICookie,szString); } } if(g_szAsmCodeIndent[0]) g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-2] = 0; szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,UNSCOPE()); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("// end of property %s::"),ProperName((char*)pszClassName)); strcpy_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT(ProperName((char*)psz))); printLine(GUICookie,szString); } // end if(DumpBody) return TRUE; } BOOL DumpMembers(mdTypeDef cl, const char *pszClassNamespace, const char *pszClassName, DWORD dwClassAttrs, DWORD dwEntryPointToken, void* GUICookie) { HRESULT hr; mdToken *pMemberList = NULL; DWORD NumMembers, NumFields,NumMethods,NumEvents,NumProps; DWORD i; HENUMInternal hEnumMethod; HENUMInternal hEnumField; HENUMInternal hEnumEvent; HENUMInternal hEnumProp; CQuickBytes qbMemberSig; BOOL ret; // Get the total count of methods + fields hr = g_pImport->EnumInit(mdtMethodDef, cl, &hEnumMethod); if (FAILED(hr)) { FailedToEnum: printLine(GUICookie,RstrUTF(IDS_E_MEMBRENUM)); ret = FALSE; goto CloseHandlesAndReturn; } NumMembers = NumMethods = g_pImport->EnumGetCount(&hEnumMethod); if (FAILED(g_pImport->EnumInit(mdtFieldDef, cl, &hEnumField))) goto FailedToEnum; NumFields = g_pImport->EnumGetCount(&hEnumField); NumMembers += NumFields; if (FAILED(g_pImport->EnumInit(mdtEvent, cl, &hEnumEvent))) goto FailedToEnum; NumEvents = g_pImport->EnumGetCount(&hEnumEvent); NumMembers += NumEvents; if (FAILED(g_pImport->EnumInit(mdtProperty, cl, &hEnumProp))) goto FailedToEnum; NumProps = g_pImport->EnumGetCount(&hEnumProp); NumMembers += NumProps; ret = TRUE; if(NumMembers) { pMemberList = new (nothrow) mdToken[NumMembers]; if(pMemberList == NULL) ret = FALSE; } if ((NumMembers == 0)||(pMemberList == NULL)) goto CloseHandlesAndReturn; for (i = 0; g_pImport->EnumNext(&hEnumField, &pMemberList[i]); i++); for (; g_pImport->EnumNext(&hEnumMethod, &pMemberList[i]); i++); for (; g_pImport->EnumNext(&hEnumEvent, &pMemberList[i]); i++); for (; g_pImport->EnumNext(&hEnumProp, &pMemberList[i]); i++); _ASSERTE(i == NumMembers); for (i = 0; i < NumMembers; i++) { mdToken tk = pMemberList[i]; if(g_pImport->IsValidToken(tk)) { switch (TypeFromToken(tk)) { case mdtFieldDef: ret = DumpField(pMemberList[i], pszClassName, GUICookie,TRUE); break; case mdtMethodDef: ret = DumpMethod(pMemberList[i], pszClassName, dwEntryPointToken,GUICookie,TRUE); break; case mdtEvent: ret = DumpEvent(pMemberList[i], pszClassName, dwClassAttrs,GUICookie,TRUE); break; case mdtProperty: ret = DumpProp(pMemberList[i], pszClassName, dwClassAttrs,GUICookie,TRUE); break; default: { char szStr[4096]; sprintf_s(szStr,4096,RstrUTF(IDS_E_ODDMEMBER),pMemberList[i],pszClassName); printLine(GUICookie,szStr); } ret = FALSE; break; } // end switch } else { char szStr[256]; sprintf_s(szStr,256,ERRORMSG("INVALID MEMBER TOKEN: 0x%8.8X"),tk); printLine(GUICookie,szStr); ret= FALSE; } if(ret && (g_Mode == MODE_DUMP_CLASS_METHOD_SIG)) break; } // end for ret = TRUE; CloseHandlesAndReturn: g_pImport->EnumClose(&hEnumMethod); g_pImport->EnumClose(&hEnumField); g_pImport->EnumClose(&hEnumEvent); g_pImport->EnumClose(&hEnumProp); if(pMemberList) delete[] pMemberList; return ret; } BOOL GetClassLayout(mdTypeDef cl, ULONG* pulPackSize, ULONG* pulClassSize) { // Dump class layout HENUMInternal hEnumField; BOOL ret = FALSE; if(g_rFieldOffset) VDELETE(g_rFieldOffset); g_cFieldOffsets = 0; g_cFieldsMax = 0; if(RidFromToken(cl)==0) return TRUE; if (SUCCEEDED(g_pImport->EnumInit(mdtFieldDef, cl, &hEnumField))) { g_cFieldsMax = g_pImport->EnumGetCount(&hEnumField); g_pImport->EnumClose(&hEnumField); } if(SUCCEEDED(g_pImport->GetClassPackSize(cl,pulPackSize))) ret = TRUE; else *pulPackSize = 0xFFFFFFFF; if(SUCCEEDED(g_pImport->GetClassTotalSize(cl,pulClassSize))) ret = TRUE; else *pulClassSize = 0xFFFFFFFF; if(g_cFieldsMax) { MD_CLASS_LAYOUT Layout; if(SUCCEEDED(g_pImport->GetClassLayoutInit(cl,&Layout))) { g_rFieldOffset = new COR_FIELD_OFFSET[g_cFieldsMax+1]; if(g_rFieldOffset) { COR_FIELD_OFFSET* pFO = g_rFieldOffset; for(g_cFieldOffsets=0; SUCCEEDED(g_pImport->GetClassLayoutNext(&Layout,&(pFO->ridOfField),(ULONG*)&(pFO->ulOffset))) &&RidFromToken(pFO->ridOfField); g_cFieldOffsets++, pFO++) ret = TRUE; } } } return ret; } BOOL IsANestedInB(mdTypeDef A, mdTypeDef B) { DWORD i; for(i = 0; i < g_NumClasses; i++) { if(g_cl_list[i] == A) { A = g_cl_enclosing[i]; if(A == B) return TRUE; if(A == mdTypeDefNil) return FALSE; return IsANestedInB(A,B); } } return FALSE; } mdTypeDef TopEncloser(mdTypeDef A) { DWORD i; for(i = 0; i < g_NumClasses; i++) { if(g_cl_list[i] == A) { if(g_cl_enclosing[i] == mdTypeDefNil) return A; return TopEncloser(g_cl_enclosing[i]); } } return A; } BOOL DumpClass(mdTypeDef cl, DWORD dwEntryPointToken, void* GUICookie, ULONG WhatToDump) // WhatToDump: 0-title,flags,extends,implements; // +1-pack,size and custom attrs; // +2-nested classes // +4-members { char *pszClassName; // name associated with this CL char *pszNamespace; const char *pc1,*pc2; DWORD dwClassAttrs; mdTypeRef crExtends; HRESULT hr; mdInterfaceImpl ii; DWORD NumInterfaces; DWORD i; HENUMInternal hEnumII; // enumerator for interface impl //char *szString; char* szptr; mdToken tkVarOwner = g_tkVarOwner; ULONG WhatToDumpOrig = WhatToDump; if (FAILED(g_pImport->GetNameOfTypeDef( cl, &pc1, //&pszClassName, &pc2))) //&pszNamespace { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), cl); printError(GUICookie, sz); g_tkVarOwner = tkVarOwner; return FALSE; } MAKE_NAME_IF_NONE(pc1,cl); if (g_Mode == MODE_DUMP_CLASS || g_Mode == MODE_DUMP_CLASS_METHOD || g_Mode == MODE_DUMP_CLASS_METHOD_SIG) { if(cl != g_tkClassToDump) { if(IsANestedInB(g_tkClassToDump,cl)) WhatToDump = 2; // nested classes only else return TRUE; } } if (FAILED(g_pImport->GetTypeDefProps( cl, &dwClassAttrs, &crExtends))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), cl); printError(GUICookie, sz); g_tkVarOwner = tkVarOwner; return FALSE; } if(g_fLimitedVisibility) { if(g_fHidePub && (IsTdPublic(dwClassAttrs)||IsTdNestedPublic(dwClassAttrs))) return FALSE; if(g_fHidePriv && (IsTdNotPublic(dwClassAttrs)||IsTdNestedPrivate(dwClassAttrs))) return FALSE; if(g_fHideFam && IsTdNestedFamily(dwClassAttrs)) return FALSE; if(g_fHideAsm && IsTdNestedAssembly(dwClassAttrs)) return FALSE; if(g_fHideFOA && IsTdNestedFamORAssem(dwClassAttrs)) return FALSE; if(g_fHideFAA && IsTdNestedFamANDAssem(dwClassAttrs)) return FALSE; } g_tkVarOwner = cl; pszClassName = (char*)(pc1 ? pc1 : ""); pszNamespace = (char*)(pc2 ? pc2 : ""); szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".class")); if(g_fDumpTokens) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("/*%8.8X*/ "),cl); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)0)); if (IsTdInterface(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"interface "); if (IsTdPublic(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"public "); if (IsTdNotPublic(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"private "); if (IsTdAbstract(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"abstract "); if (IsTdAutoLayout(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"auto "); if (IsTdSequentialLayout(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"sequential "); if (IsTdExplicitLayout(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"explicit "); if (IsTdAnsiClass(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"ansi "); if (IsTdUnicodeClass(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"unicode "); if (IsTdAutoClass(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"autochar "); if (IsTdImport(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"import "); if (IsTdWindowsRuntime(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"windowsruntime "); if (IsTdSerializable(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"serializable "); if (IsTdSealed(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"sealed "); if (IsTdNestedPublic(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"nested public "); if (IsTdNestedPrivate(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"nested private "); if (IsTdNestedFamily(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"nested family "); if (IsTdNestedAssembly(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"nested assembly "); if (IsTdNestedFamANDAssem(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"nested famandassem "); if (IsTdNestedFamORAssem(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"nested famorassem "); if (IsTdBeforeFieldInit(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"beforefieldinit "); if (IsTdSpecialName(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"specialname "); if (IsTdRTSpecialName(dwClassAttrs)) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"rtspecialname "); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD((char*)-1)); if(*pszNamespace != 0) szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s.",ProperName(pszNamespace)); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),WhatToDump > 2 ? ANCHORPT(ProperName(pszClassName),cl) : JUMPPT(ProperName(pszClassName),cl)); szptr = DumpGenericPars(szString, cl, GUICookie,TRUE); if (szptr == NULL) { g_tkVarOwner = tkVarOwner; return FALSE; } printLine(GUICookie,szString); if (!IsNilToken(crExtends)) { CQuickBytes out; szptr = szString; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s %s ",g_szAsmCodeIndent,KEYWORD("extends")); if(g_pImport->IsValidToken(crExtends)) PrettyPrintToken(szString, crExtends, g_pImport,GUICookie,cl); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),ERRORMSG("INVALID TOKEN: 0x%8.8X"),crExtends); printLine(GUICookie,szString); } hr = g_pImport->EnumInit( mdtInterfaceImpl, cl, &hEnumII); if (FAILED(hr)) { printError(GUICookie,RstrUTF(IDS_E_ENUMINIT)); g_tkVarOwner = tkVarOwner; return FALSE; } NumInterfaces = g_pImport->EnumGetCount(&hEnumII); if (NumInterfaces > 0) { CQuickBytes out; mdTypeRef crInterface; for (i=0; g_pImport->EnumNext(&hEnumII, &ii); i++) { szptr = szString; if(i) szptr+=sprintf_s(szptr,SZSTRING_SIZE, "%s ",g_szAsmCodeIndent); else szptr+=sprintf_s(szptr,SZSTRING_SIZE, "%s %s ",g_szAsmCodeIndent,KEYWORD("implements")); if (FAILED(g_pImport->GetTypeOfInterfaceImpl(ii, &crInterface))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), ii); printError(GUICookie, sz); g_tkVarOwner = tkVarOwner; return FALSE; } if(g_pImport->IsValidToken(crInterface)) PrettyPrintToken(szString, crInterface, g_pImport,GUICookie,cl); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),ERRORMSG("INVALID TOKEN: 0x%8.8X"),crInterface); if(i < NumInterfaces-1) strcat_s(szString, SZSTRING_SIZE,","); printLine(GUICookie,szString); out.Shrink(0); } // The assertion will fire if the enumerator is bad _ASSERTE(NumInterfaces == i); g_pImport->EnumClose(&hEnumII); } if(WhatToDump == 0) // 0 = title only { sprintf_s(szString,SZSTRING_SIZE,"%s%s %s",g_szAsmCodeIndent,SCOPE(),UNSCOPE()); printLine(GUICookie,szString); g_tkVarOwner = tkVarOwner; return TRUE; } sprintf_s(szString,SZSTRING_SIZE,"%s%s",g_szAsmCodeIndent,SCOPE()); printLine(GUICookie,szString); strcat_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH," "); ULONG ulPackSize=0xFFFFFFFF,ulClassSize=0xFFFFFFFF; if(WhatToDump & 1) { if(GetClassLayout(cl,&ulPackSize,&ulClassSize)) { // Dump class layout if(ulPackSize != 0xFFFFFFFF) { sprintf_s(szString,SZSTRING_SIZE,"%s%s %d",g_szAsmCodeIndent,KEYWORD(".pack"),ulPackSize); printLine(GUICookie,szString); } if(ulClassSize != 0xFFFFFFFF) { sprintf_s(szString,SZSTRING_SIZE,"%s%s %d",g_szAsmCodeIndent,KEYWORD(".size"),ulClassSize); printLine(GUICookie,szString); } } DumpCustomAttributes(cl,GUICookie); // Dev11 #10745 // Dump InterfaceImpl custom attributes here if (NumInterfaces > 0 && g_fShowCA) { hr = g_pImport->EnumInit( mdtInterfaceImpl, cl, &hEnumII); if (FAILED(hr)) { printError(GUICookie,RstrUTF(IDS_E_ENUMINIT)); g_tkVarOwner = tkVarOwner; return FALSE; } ASSERT_AND_CHECK(NumInterfaces == g_pImport->EnumGetCount(&hEnumII)); CQuickBytes out; mdTypeRef crInterface; for (i = 0; g_pImport->EnumNext(&hEnumII, &ii); i++) { HENUMInternal hEnum; mdCustomAttribute tkCA; bool fFirst = true; if (FAILED(g_pImport->EnumInit(mdtCustomAttribute, ii,&hEnum))) { return FALSE; } while(g_pImport->EnumNext(&hEnum,&tkCA) && RidFromToken(tkCA)) { if (fFirst) { // Print .interfaceImpl type {type} before the custom attribute list szptr = szString; szptr += sprintf_s(szptr, SZSTRING_SIZE, "%s.%s ", g_szAsmCodeIndent, KEYWORD("interfaceimpl type")); if (FAILED(g_pImport->GetTypeOfInterfaceImpl(ii, &crInterface))) { char sz[2048]; sprintf_s(sz, 2048, RstrUTF(IDS_E_INVALIDRECORD), ii); printError(GUICookie, sz); g_tkVarOwner = tkVarOwner; return FALSE; } if(g_pImport->IsValidToken(crInterface)) PrettyPrintToken(szString, crInterface, g_pImport,GUICookie,cl); else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),ERRORMSG("INVALID TOKEN: 0x%8.8X"),crInterface); printLine(GUICookie,szString); out.Shrink(0); szptr = szString; fFirst = false; } DumpCustomAttribute(tkCA,GUICookie,false); } g_pImport->EnumClose( &hEnum); } // The assertion will fire if the enumerator is bad _ASSERTE(NumInterfaces == i); g_pImport->EnumClose(&hEnumII); } DumpGenericParsCA(cl,GUICookie); DumpPermissions(cl,GUICookie); } // Dump method impls declared in this class whose implementing methods belong somewhere else: if(WhatToDump & 1) // 1 - dump headers { for(i = 0; i < g_NumMI; i++) { if(((*g_pmi_list)[i].tkClass == cl)&&((*g_pmi_list)[i].tkBodyParent != cl)) { BOOL bOverridingTypeSpec; PrettyPrintOverrideDecl(i,szString,GUICookie,cl,&bOverridingTypeSpec); strcat_s(szString, SZSTRING_SIZE,KEYWORD(" with ")); if (bOverridingTypeSpec) { // If PrettyPrintOverrideDecl printed the 'method' keyword, we need it here as well // to satisfy the following grammar rule (simplified): // _OVERRIDE METHOD_ ... DCOLON methodName ... WITH_ METHOD_ ... DCOLON methodName ... strcat_s(szString, SZSTRING_SIZE,KEYWORD("method ")); } PrettyPrintToken(szString, (*g_pmi_list)[i].tkBody, g_pImport,GUICookie,0); printLine(GUICookie,szString); } } } if(WhatToDump & 2) // nested classes { BOOL fRegetClassLayout=FALSE; DWORD dwMode = g_Mode; if(g_Mode == MODE_DUMP_CLASS) g_Mode = MODE_DUMP_ALL; for(i = 0; i < g_NumClasses; i++) { if(g_cl_enclosing[i] == cl) { DumpClass(g_cl_list[i],dwEntryPointToken,GUICookie,WhatToDumpOrig); fRegetClassLayout = TRUE; } } if(fRegetClassLayout) GetClassLayout(cl,&ulPackSize,&ulClassSize); g_Mode = dwMode; } if(WhatToDump & 4) { DumpMembers(cl, pszNamespace, pszClassName, dwClassAttrs, dwEntryPointToken,GUICookie); } if(g_szAsmCodeIndent[0]) g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-2] = 0; szptr = szString; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s %s// end of class ",g_szAsmCodeIndent,UNSCOPE(),COMMENT((char*)0)); if(*pszNamespace != 0) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s.",ProperName(pszNamespace)); sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s%s", ProperName(pszClassName),COMMENT((char*)-1)); printLine(GUICookie,szString); printLine(GUICookie,""); g_tkVarOwner = tkVarOwner; return TRUE; } void DumpGlobalMethods(DWORD dwEntryPointToken) { HENUMInternal hEnumMethod; mdToken FuncToken; DWORD i; CQuickBytes qbMemberSig; if (FAILED(g_pImport->EnumGlobalFunctionsInit(&hEnumMethod))) return; for (i = 0; g_pImport->EnumNext(&hEnumMethod, &FuncToken); i++) { if (i == 0) { printLine(g_pFile,""); printLine(g_pFile,COMMENT("// ================== GLOBAL METHODS =========================")); printLine(g_pFile,""); } if(DumpMethod(FuncToken, NULL, dwEntryPointToken, g_pFile, TRUE)&& (g_Mode == MODE_DUMP_CLASS_METHOD || g_Mode == MODE_DUMP_CLASS_METHOD_SIG)) break; } g_pImport->EnumClose(&hEnumMethod); if(i) { printLine(g_pFile,""); printLine(g_pFile,COMMENT("// =============================================================")); printLine(g_pFile,""); } } void DumpGlobalFields() { HENUMInternal hEnum; mdToken FieldToken; DWORD i; CQuickBytes qbMemberSig; if (FAILED(g_pImport->EnumGlobalFieldsInit(&hEnum))) return; for (i = 0; g_pImport->EnumNext(&hEnum, &FieldToken); i++) { if (i == 0) { printLine(g_pFile,""); printLine(g_pFile,COMMENT("// ================== GLOBAL FIELDS ==========================")); printLine(g_pFile,""); } if(DumpField(FieldToken, NULL, g_pFile, TRUE)&& (g_Mode == MODE_DUMP_CLASS_METHOD || g_Mode == MODE_DUMP_CLASS_METHOD_SIG)) break; } g_pImport->EnumClose(&hEnum); if(i) { printLine(g_pFile,""); printLine(g_pFile,COMMENT("// =============================================================")); printLine(g_pFile,""); } } void DumpVTables(IMAGE_COR20_HEADER *CORHeader, void* GUICookie) { IMAGE_COR_VTABLEFIXUP *pFixup,*pDummy; DWORD iCount; DWORD i; USHORT iSlot; char* szStr = &szString[0]; if (VAL32(CORHeader->VTableFixups.VirtualAddress) == 0) return; sprintf_s(szString,SZSTRING_SIZE,"// VTableFixup Directory:"); printLine(GUICookie,szStr); // Pull back a pointer to it. iCount = VAL32(CORHeader->VTableFixups.Size) / sizeof(IMAGE_COR_VTABLEFIXUP); if ((g_pPELoader->getVAforRVA(VAL32(CORHeader->VTableFixups.VirtualAddress), (void **) &pFixup) == FALSE) ||(g_pPELoader->getVAforRVA(VAL32(CORHeader->VTableFixups.VirtualAddress)+VAL32(CORHeader->VTableFixups.Size)-1, (void **) &pDummy) == FALSE)) { printLine(GUICookie,RstrUTF(IDS_E_VTFUTABLE)); goto exit; } // Walk every v-table fixup entry and dump the slots. for (i=0; i<iCount; i++) { sprintf_s(szString,SZSTRING_SIZE,"// IMAGE_COR_VTABLEFIXUP[%d]:", i); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// RVA: 0x%08x", VAL32(pFixup->RVA)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Count: 0x%04x", VAL16(pFixup->Count)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Type: 0x%04x", VAL16(pFixup->Type)); printLine(GUICookie,szStr); BYTE *pSlot; if (g_pPELoader->getVAforRVA(VAL32(pFixup->RVA), (void **) &pSlot) == FALSE) { printLine(GUICookie,RstrUTF(IDS_E_BOGUSRVA)); goto NextEntry; } for (iSlot=0; iSlot<pFixup->Count; iSlot++) { mdMethodDef tkMethod = VAL32(*(DWORD *) pSlot); if (pFixup->Type & VAL16(COR_VTABLE_32BIT)) { sprintf_s(szString,SZSTRING_SIZE,"// [0x%04x] (0x%08x)", iSlot, tkMethod); pSlot += sizeof(DWORD); } else { sprintf_s(szString,SZSTRING_SIZE,"// [0x%04x] (0x%16llx)", iSlot, VAL64(*(unsigned __int64 *) pSlot)); pSlot += sizeof(unsigned __int64); } printLine(GUICookie,szStr); ValidateToken(tkMethod, mdtMethodDef); } // Pointer to next fixup entry. NextEntry: ++pFixup; } exit: printLine(GUICookie,""); } void DumpEATTable(IMAGE_COR20_HEADER *CORHeader, void* GUICookie) { BYTE *pFixup,*pDummy; DWORD iCount; DWORD BufferRVA; DWORD i; char* szStr = &szString[0]; sprintf_s(szString,SZSTRING_SIZE,"// Export Address Table Jumps:"); printLine(GUICookie,szStr); if (VAL32(CORHeader->ExportAddressTableJumps.VirtualAddress) == 0) { printLine(GUICookie,RstrUTF(IDS_E_NODATA)); return; } // Pull back a pointer to it. iCount = VAL32(CORHeader->ExportAddressTableJumps.Size) / IMAGE_COR_EATJ_THUNK_SIZE; if ((g_pPELoader->getVAforRVA(VAL32(CORHeader->ExportAddressTableJumps.VirtualAddress), (void **) &pFixup) == FALSE) ||(g_pPELoader->getVAforRVA(VAL32(CORHeader->ExportAddressTableJumps.VirtualAddress)+VAL32(CORHeader->ExportAddressTableJumps.Size)-1, (void **) &pDummy) == FALSE)) { printLine(GUICookie,RstrUTF(IDS_E_EATJTABLE)); goto exit; } // Quick sanity check on the linker. if (VAL32(CORHeader->ExportAddressTableJumps.Size) % IMAGE_COR_EATJ_THUNK_SIZE) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_EATJSIZE), VAL32(CORHeader->ExportAddressTableJumps.Size), IMAGE_COR_EATJ_THUNK_SIZE); printLine(GUICookie,szStr); } // Walk every v-table fixup entry and dump the slots. BufferRVA = VAL32(CORHeader->ExportAddressTableJumps.VirtualAddress); for (i=0; i<iCount; i++) { ULONG ReservedFlag = VAL32(*(ULONG *) (pFixup + sizeof(ULONG))); sprintf_s(szString,SZSTRING_SIZE,"// Fixup Jump Entry [%d], at RVA 0x%08x:", i, BufferRVA); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// RVA of slot: 0x%08x", VAL32(*(ULONG *) pFixup)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Reserved flag: 0x%08x", ReservedFlag); printLine(GUICookie,szStr); if (ReservedFlag != 0) { printLine(GUICookie,RstrUTF(IDS_E_RESFLAGS)); } pFixup += IMAGE_COR_EATJ_THUNK_SIZE; BufferRVA += IMAGE_COR_EATJ_THUNK_SIZE; } exit: printLine(GUICookie,""); } void DumpCodeManager(IMAGE_COR20_HEADER *CORHeader, void* GUICookie) { char* szStr = &szString[0]; sprintf_s(szString,SZSTRING_SIZE,"// Code Manager Table:"); printLine(GUICookie,szStr); if (!VAL32(CORHeader->CodeManagerTable.Size)) { sprintf_s(szString,SZSTRING_SIZE,"// default"); printLine(GUICookie,szStr); return; } const GUID *pcm; if (g_pPELoader->getVAforRVA(VAL32(CORHeader->CodeManagerTable.VirtualAddress), (void **) &pcm) == FALSE) { printLine(GUICookie,RstrUTF(IDS_E_CODEMGRTBL)); return; } sprintf_s(szString,SZSTRING_SIZE,"// [index] ID"); printLine(GUICookie,szStr); ULONG iCount = VAL32(CORHeader->CodeManagerTable.Size) / sizeof(GUID); for (ULONG i=0; i<iCount; i++) { WCHAR rcguid[128]; GUID Guid = *pcm; SwapGuid(&Guid); StringFromGUID2(Guid, rcguid, ARRAY_SIZE(rcguid)); sprintf_s(szString,SZSTRING_SIZE,"// [0x%08x] %S", i, rcguid); printLine(GUICookie,szStr); pcm++; } printLine(GUICookie,""); } void DumpSectionHeaders(IMAGE_SECTION_HEADER* pSH, USHORT nSH, void* GUICookie) { char* szStr = &szString[0]; char name[16]; printLine(GUICookie,""); strcpy_s(szString,SZSTRING_SIZE,"// Image sections:"); printLine(GUICookie,szStr); for(USHORT iSH=0; iSH < nSH; iSH++,pSH++) { strncpy_s(name,16,(const char*)(pSH->Name),8); name[8]=0; sprintf_s(szString,SZSTRING_SIZE,"// %s",name); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Virtual Size", pSH->Misc.VirtualSize); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Virtual Address", pSH->VirtualAddress); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Size of Raw Data", pSH->SizeOfRawData); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Pointer to Raw Data", pSH->PointerToRawData); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Pointer to Relocations", pSH->PointerToRelocations); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Pointer to Linenumbers", pSH->PointerToLinenumbers); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%04x Number of Relocations", pSH->NumberOfRelocations); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%04x Number of Linenumbers", pSH->NumberOfLinenumbers); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Characteristics", pSH->Characteristics); printLine(GUICookie,szStr); if((pSH->Characteristics & IMAGE_SCN_SCALE_INDEX)) { strcpy_s(szString,SZSTRING_SIZE,"// SCALE_INDEX"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_CNT_CODE)) { strcpy_s(szString,SZSTRING_SIZE,"// CNT_CODE"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_CNT_INITIALIZED_DATA)) { strcpy_s(szString,SZSTRING_SIZE,"// CNT_INITIALIZED_DATA"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_CNT_UNINITIALIZED_DATA)) { strcpy_s(szString,SZSTRING_SIZE,"// CNT_UNINITIALIZED_DATA"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_NO_DEFER_SPEC_EXC)) { strcpy_s(szString,SZSTRING_SIZE,"// NO_DEFER_SPEC_EXC"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_LNK_NRELOC_OVFL)) { strcpy_s(szString,SZSTRING_SIZE,"// LNK_NRELOC_OVFL"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_MEM_DISCARDABLE)) { strcpy_s(szString,SZSTRING_SIZE,"// MEM_DISCARDABLE"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_MEM_NOT_CACHED)) { strcpy_s(szString,SZSTRING_SIZE,"// MEM_NOT_CACHED"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_MEM_NOT_PAGED)) { strcpy_s(szString,SZSTRING_SIZE,"// MEM_NOT_PAGED"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_MEM_SHARED)) { strcpy_s(szString,SZSTRING_SIZE,"// MEM_SHARED"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_MEM_EXECUTE)) { strcpy_s(szString,SZSTRING_SIZE,"// MEM_EXECUTE"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_MEM_READ)) { strcpy_s(szString,SZSTRING_SIZE,"// MEM_READ"); printLine(GUICookie,szStr); } if((pSH->Characteristics & IMAGE_SCN_MEM_WRITE)) { strcpy_s(szString,SZSTRING_SIZE,"// MEM_WRITE"); printLine(GUICookie,szStr); } printLine(GUICookie,""); } } void DumpBaseReloc(const char *szName, IMAGE_DATA_DIRECTORY *pDir, void* GUICookie) { char* szStr = &szString[0]; sprintf_s(szString,SZSTRING_SIZE,"// %s", szName); printLine(GUICookie,szStr); if (!VAL32(pDir->Size)) { printLine(GUICookie,RstrUTF(IDS_E_NODATA)); return; } char *pBegin, *pEnd; DWORD *pdw, i, Nentries; WORD *pw; if (g_pPELoader->getVAforRVA(VAL32(pDir->VirtualAddress), (void **) &pBegin) == FALSE) { printLine(GUICookie,RstrUTF(IDS_E_IMPORTDATA)); return; } pEnd = pBegin + VAL32(pDir->Size); for(pdw = (DWORD*)pBegin; pdw < (DWORD*)pEnd; ) { sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Page RVA", *pdw); printLine(GUICookie,szStr); pdw++; sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Block Size", *pdw); printLine(GUICookie,szStr); Nentries = (*pdw - 2*sizeof(DWORD)) / sizeof(WORD); pdw++; sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Number of Entries", Nentries); printLine(GUICookie,szStr); for(i = 1, pw = (WORD*)pdw; i <= Nentries; i++, pw++) { sprintf_s(szString,SZSTRING_SIZE,"// Entry %d: Type 0x%x Offset 0x%08x", i, ((*pw)>>12), ((*pw)&0x0FFF)); printLine(GUICookie,szStr); } if((Nentries & 1)) pw++; // to make pdw DWORD-aligned pdw = (DWORD*)pw; printLine(GUICookie,""); } } void DumpIAT(const char *szName, IMAGE_DATA_DIRECTORY *pDir, void* GUICookie) { char* szStr = &szString[0]; sprintf_s(szString,SZSTRING_SIZE,"// %s", szName); printLine(GUICookie,szStr); if (!VAL32(pDir->Size)) { printLine(GUICookie,RstrUTF(IDS_E_NODATA)); return; } const char *szDLLName; const IMAGE_IMPORT_DESCRIPTOR *pImportDesc; if (g_pPELoader->getVAforRVA(VAL32(pDir->VirtualAddress), (void **) &pImportDesc) == FALSE) { printLine(GUICookie,RstrUTF(IDS_E_IMPORTDATA)); return; } const DWORD *pImportTableID; while (VAL32(pImportDesc->FirstThunk)) { if (g_pPELoader->getVAforRVA(VAL32(pImportDesc->Name), (void **) &szDLLName) == FALSE || g_pPELoader->getVAforRVA(VAL32(pImportDesc->FirstThunk), (void **) &pImportTableID) == FALSE) { printLine(GUICookie,RstrUTF(IDS_E_IMPORTDATA)); return; } sprintf_s(szString,SZSTRING_SIZE,"// DLL : %s", szDLLName); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Import Address Table", VAL32(pImportDesc->FirstThunk)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Import Name Table", VAL32(pImportDesc->Name)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// %-8d Time Date Stamp", VAL32(pImportDesc->TimeDateStamp)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// %-8d Index of First Forwarder Reference", VAL32(pImportDesc->ForwarderChain)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"//"); printLine(GUICookie,szStr); for ( ; VAL32(*pImportTableID); pImportTableID++) { if (VAL32(*pImportTableID) & 0x80000000) sprintf_s(szString,SZSTRING_SIZE,"// by Ordinal %d", VAL32(*pImportTableID) & 0x7fffffff); else { const IMAGE_IMPORT_BY_NAME *pName; if(g_pPELoader->getVAforRVA(VAL32(*pImportTableID) & 0x7fffffff, (void **) &pName)) sprintf_s(szString,SZSTRING_SIZE,"// 0x%04x %s", VAL16(pName->Hint), pName->Name); else sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x bad RVA of IMAGE_IMPORT_BY_NAME", VAL32(*pImportTableID)); } printLine(GUICookie,szStr); } printLine(GUICookie,""); // Next import descriptor. pImportDesc++; } } struct MDStreamHeader { DWORD Reserved; BYTE Major; BYTE Minor; BYTE Heaps; BYTE Rid; ULONGLONG MaskValid; ULONGLONG Sorted; }; void DumpMetadataHeader(const char *szName, IMAGE_DATA_DIRECTORY *pDir, void* GUICookie) { char* szStr = &szString[0]; printLine(GUICookie,""); sprintf_s(szString,SZSTRING_SIZE,"// %s", szName); printLine(GUICookie,szStr); if (!VAL32(pDir->Size)) { printLine(GUICookie,RstrUTF(IDS_E_NODATA)); return; } const STORAGESIGNATURE *pSSig; char verstr[1024]; if (g_pPELoader->getVAforRVA(VAL32(pDir->VirtualAddress), (void **) &pSSig) == FALSE) { printLine(GUICookie,RstrUTF(IDS_E_IMPORTDATA)); return; } strcpy_s(szString,SZSTRING_SIZE,"// Storage Signature:"); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Signature", VAL32(pSSig->lSignature)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%04x Major Version", VAL16(pSSig->iMajorVer)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%04x Minor Version", VAL16(pSSig->iMinorVer)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Extra Data Offset", VAL32(pSSig->iExtraData)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Version String Length", VAL32(pSSig->iVersionString)); printLine(GUICookie,szStr); memset(verstr,0,1024); strncpy_s(verstr,1024,(const char*)(pSSig->pVersion),VAL32(pSSig->iVersionString)); sprintf_s(szString,SZSTRING_SIZE,"// '%s' Version String", verstr); printLine(GUICookie,szStr); size_t pb = (size_t)pSSig; pb += (3*sizeof(DWORD)+2*sizeof(WORD)+VAL32(pSSig->iVersionString)+3)&~3; PSTORAGEHEADER pSHdr = (PSTORAGEHEADER)pb; strcpy_s(szString,SZSTRING_SIZE,"// Storage Header:"); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%02x Flags", pSHdr->fFlags); printLine(GUICookie,szStr); short nStr = VAL16(pSHdr->iStreams); sprintf_s(szString,SZSTRING_SIZE,"// 0x%04x Number of Streams", nStr); if(nStr > 5) { strcat_s(szString, SZSTRING_SIZE, " -- BOGUS!"); nStr = 5; } printLine(GUICookie,szStr); PSTORAGESTREAM pStr = (PSTORAGESTREAM)(pSHdr+1); BYTE* pbMDstream = NULL; size_t cbMDstream = 0; for(short iStr = 1; iStr <= nStr; iStr++) { sprintf_s(szString,SZSTRING_SIZE,"// Stream %d:",iStr); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Offset", VAL32(pStr->iOffset)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Size", VAL32(pStr->iSize)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// '%s' Name", pStr->rcName); printLine(GUICookie,szStr); if((strcmp(pStr->rcName,"#-")==0)||(strcmp(pStr->rcName,"#~")==0)) { pbMDstream = (BYTE*)pSSig + VAL32(pStr->iOffset); cbMDstream = VAL32(pStr->iSize); } pb = (size_t)pStr; pb += (2*sizeof(DWORD)+strlen(pStr->rcName)+1+3)&~3; pStr = (PSTORAGESTREAM)pb; } if((pbMDstream)&&(cbMDstream >= sizeof(MDStreamHeader))) { printLine(GUICookie,""); strcpy_s(szString,SZSTRING_SIZE,"// Metadata Stream Header:"); printLine(GUICookie,szStr); MDStreamHeader* pMDSH = (MDStreamHeader*)pbMDstream; sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x Reserved", VAL32(pMDSH->Reserved)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%02x Major", pMDSH->Major); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%02x Minor", pMDSH->Minor); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%02x Heaps", pMDSH->Heaps); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%02x Rid", pMDSH->Rid); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%016I64x MaskValid", (ULONGLONG)GET_UNALIGNED_VAL64(&(pMDSH->MaskValid))); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// 0x%016I64x Sorted", (ULONGLONG)GET_UNALIGNED_VAL64(&(pMDSH->Sorted))); printLine(GUICookie,szStr); } } void DumpEntryPoint(DWORD dwAddrOfEntryPoint,DWORD dwEntryPointSize,void* GUICookie) { BYTE* pB; char* szStr = &szString[0]; char* szptr = szStr+2; DWORD i; printLine(GUICookie,""); strcpy_s(szString,SZSTRING_SIZE,"// Entry point code:"); printLine(GUICookie,szStr); if (g_pPELoader->getVAforRVA(dwAddrOfEntryPoint, (void **) &pB) == FALSE) { printLine(GUICookie,"Bad RVA of entry point"); return; } if(dwEntryPointSize == 48) pB -= 32; // on IA64, AddressOfEntryPoint points at PLabelDescriptor, not at the stub itself for(i=0; i<dwEntryPointSize; i++) { szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%2.2X ",pB[i]); } printLine(GUICookie,szStr); } #define DUMP_DIRECTORY(szName, Directory) \ sprintf_s(szString,SZSTRING_SIZE,"// 0x%08x [0x%08x] address [size] of " szName, \ VAL32(Directory.VirtualAddress), VAL32(Directory.Size)); \ printLine(GUICookie,szStr) #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:21000) // Suppress PREFast warning about overly large function #endif void DumpHeader(IMAGE_COR20_HEADER *CORHeader, void* GUICookie) { char* szStr = &szString[0]; DWORD dwAddrOfEntryPoint=0, dwEntryPointSize=0; PIMAGE_DOS_HEADER pDOSHeader = g_pPELoader->dosHeader(); strcpy_s(szString,SZSTRING_SIZE,"// ----- DOS Header:"); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Magic: 0x%04x", VAL16(pDOSHeader->e_magic)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Bytes on last page: 0x%04x", VAL16(pDOSHeader->e_cblp)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Pages in file: 0x%04x", VAL16(pDOSHeader->e_cp)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Relocations: 0x%04x", VAL16(pDOSHeader->e_crlc)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of header (paragraphs):0x%04x", VAL16(pDOSHeader->e_cparhdr)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Min extra paragraphs: 0x%04x", VAL16(pDOSHeader->e_minalloc)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Max extra paragraphs: 0x%04x", VAL16(pDOSHeader->e_maxalloc)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Initial (relative) SS: 0x%04x", VAL16(pDOSHeader->e_ss)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Initial SP: 0x%04x", VAL16(pDOSHeader->e_sp)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Checksum: 0x%04x", VAL16(pDOSHeader->e_csum)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Initial IP: 0x%04x", VAL16(pDOSHeader->e_ip)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Initial (relative) CS: 0x%04x", VAL16(pDOSHeader->e_ip)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// File addr. of reloc table: 0x%04x", VAL16(pDOSHeader->e_lfarlc)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Overlay number: 0x%04x", VAL16(pDOSHeader->e_ovno)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// OEM identifier: 0x%04x", VAL16(pDOSHeader->e_oemid)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// OEM info: 0x%04x", VAL16(pDOSHeader->e_oeminfo)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// File addr. of COFF header: 0x%04x", VAL16(pDOSHeader->e_lfanew)); printLine(GUICookie,szStr); strcpy_s(szString,SZSTRING_SIZE,"// ----- COFF/PE Headers:"); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Signature: 0x%08x", VAL32(g_pPELoader->Signature())); printLine(GUICookie,szStr); strcpy_s(szString,SZSTRING_SIZE,"// ----- COFF Header:"); printLine(GUICookie,szStr); PIMAGE_FILE_HEADER pCOFF = g_pPELoader->coffHeader(); sprintf_s(szString,SZSTRING_SIZE,"// Machine: 0x%04x", VAL16(pCOFF->Machine)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Number of sections: 0x%04x", VAL16(pCOFF->NumberOfSections)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Time-date stamp: 0x%08x", VAL32(pCOFF->TimeDateStamp)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Ptr to symbol table: 0x%08x", VAL32(pCOFF->PointerToSymbolTable)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Number of symbols: 0x%08x", VAL32(pCOFF->NumberOfSymbols)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of optional header: 0x%04x", VAL16(pCOFF->SizeOfOptionalHeader)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Characteristics: 0x%04x", VAL16(pCOFF->Characteristics)); printLine(GUICookie,szStr); if (g_pPELoader->IsPE32()) { IMAGE_NT_HEADERS32 *pNTHeader = g_pPELoader->ntHeaders32(); IMAGE_OPTIONAL_HEADER32 *pOptHeader = &pNTHeader->OptionalHeader; strcpy_s(szString,SZSTRING_SIZE,"// ----- PE Optional Header (32 bit):"); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Magic: 0x%04x", VAL16(pOptHeader->Magic)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major linker version: 0x%02x", VAL16(pOptHeader->MajorLinkerVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor linker version: 0x%02x", VAL16(pOptHeader->MinorLinkerVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of code: 0x%08x", VAL32(pOptHeader->SizeOfCode)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of init.data: 0x%08x", VAL32(pOptHeader->SizeOfInitializedData)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of uninit.data: 0x%08x", VAL32(pOptHeader->SizeOfUninitializedData)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Addr. of entry point: 0x%08x", VAL32(pOptHeader->AddressOfEntryPoint)); printLine(GUICookie,szStr); dwAddrOfEntryPoint = VAL32(pOptHeader->AddressOfEntryPoint); dwEntryPointSize = 6; sprintf_s(szString,SZSTRING_SIZE,"// Base of code: 0x%08x", VAL32(pOptHeader->BaseOfCode)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Base of data: 0x%08x", VAL32(pOptHeader->BaseOfData)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Image base: 0x%08x", VAL32(pOptHeader->ImageBase)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Section alignment: 0x%08x", VAL32(pOptHeader->SectionAlignment)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// File alignment: 0x%08x", VAL32(pOptHeader->FileAlignment)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major OS version: 0x%04x", VAL16(pOptHeader->MajorOperatingSystemVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor OS version: 0x%04x", VAL16(pOptHeader->MinorOperatingSystemVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major image version: 0x%04x", VAL16(pOptHeader->MajorImageVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor image version: 0x%04x", VAL16(pOptHeader->MinorImageVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major subsystem version: 0x%04x", VAL16(pOptHeader->MajorSubsystemVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor subsystem version: 0x%04x", VAL16(pOptHeader->MinorSubsystemVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of image: 0x%08x", VAL32(pOptHeader->SizeOfImage)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of headers: 0x%08x", VAL32(pOptHeader->SizeOfHeaders)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Checksum: 0x%08x", VAL32(pOptHeader->CheckSum)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Subsystem: 0x%04x", VAL16(pOptHeader->Subsystem)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// DLL characteristics: 0x%04x", VAL16(pOptHeader->DllCharacteristics)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of stack reserve: 0x%08x", VAL32(pOptHeader->SizeOfStackReserve)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of stack commit: 0x%08x", VAL32(pOptHeader->SizeOfStackCommit)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of heap reserve: 0x%08x", VAL32(pOptHeader->SizeOfHeapReserve)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of heap commit: 0x%08x", VAL32(pOptHeader->SizeOfHeapCommit)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Loader flags: 0x%08x", VAL32(pOptHeader->LoaderFlags)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Directories: 0x%08x", VAL32(pOptHeader->NumberOfRvaAndSizes)); printLine(GUICookie,szStr); DUMP_DIRECTORY("Export Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT]); DUMP_DIRECTORY("Import Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT]); DUMP_DIRECTORY("Resource Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_RESOURCE]); DUMP_DIRECTORY("Exception Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_EXCEPTION]); DUMP_DIRECTORY("Security Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_SECURITY]); DUMP_DIRECTORY("Base Relocation Table: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_BASERELOC]); DUMP_DIRECTORY("Debug Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_DEBUG]); DUMP_DIRECTORY("Architecture Specific: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_ARCHITECTURE]); DUMP_DIRECTORY("Global Pointer: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_GLOBALPTR]); DUMP_DIRECTORY("TLS Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_TLS]); DUMP_DIRECTORY("Load Config Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG]); DUMP_DIRECTORY("Bound Import Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT]); DUMP_DIRECTORY("Import Address Table: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_IAT]); DUMP_DIRECTORY("Delay Load IAT: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT]); DUMP_DIRECTORY("CLR Header: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR]); printLine(GUICookie,""); DumpSectionHeaders((IMAGE_SECTION_HEADER*)(pOptHeader+1),pNTHeader->FileHeader.NumberOfSections,GUICookie); DumpBaseReloc("Base Relocation Table",&pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_BASERELOC],GUICookie); DumpIAT("Import Address Table", &pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT],GUICookie); DumpIAT("Delay Load Import Address Table", &pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT],GUICookie); } else { IMAGE_NT_HEADERS64 *pNTHeader = g_pPELoader->ntHeaders64(); IMAGE_OPTIONAL_HEADER64 *pOptHeader = &pNTHeader->OptionalHeader; strcpy_s(szString,SZSTRING_SIZE,"// ----- PE Optional Header (64 bit):"); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Magic: 0x%04x", VAL16(pOptHeader->Magic)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major linker version: 0x%02x", VAL16(pOptHeader->MajorLinkerVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor linker version: 0x%02x", VAL16(pOptHeader->MinorLinkerVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of code: 0x%08x", VAL32(pOptHeader->SizeOfCode)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of init.data: 0x%08x", VAL32(pOptHeader->SizeOfInitializedData)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of uninit.data: 0x%08x", VAL32(pOptHeader->SizeOfUninitializedData)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Addr. of entry point: 0x%08x", VAL32(pOptHeader->AddressOfEntryPoint)); printLine(GUICookie,szStr); dwAddrOfEntryPoint = VAL32(pOptHeader->AddressOfEntryPoint); dwEntryPointSize = (VAL16(pCOFF->Machine)==IMAGE_FILE_MACHINE_IA64) ? 48 : 12; sprintf_s(szString,SZSTRING_SIZE,"// Base of code: 0x%08x", VAL32(pOptHeader->BaseOfCode)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Image base: 0x%016I64x", VAL64(pOptHeader->ImageBase)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Section alignment: 0x%08x", VAL32(pOptHeader->SectionAlignment)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// File alignment: 0x%08x", VAL32(pOptHeader->FileAlignment)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major OS version: 0x%04x", VAL16(pOptHeader->MajorOperatingSystemVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor OS version: 0x%04x", VAL16(pOptHeader->MinorOperatingSystemVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major image version: 0x%04x", VAL16(pOptHeader->MajorImageVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor image version: 0x%04x", VAL16(pOptHeader->MinorImageVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major subsystem version: 0x%04x", VAL16(pOptHeader->MajorSubsystemVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor subsystem version: 0x%04x", VAL16(pOptHeader->MinorSubsystemVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of image: 0x%08x", VAL32(pOptHeader->SizeOfImage)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of headers: 0x%08x", VAL32(pOptHeader->SizeOfHeaders)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Checksum: 0x%08x", VAL32(pOptHeader->CheckSum)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Subsystem: 0x%04x", VAL16(pOptHeader->Subsystem)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// DLL characteristics: 0x%04x", VAL16(pOptHeader->DllCharacteristics)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of stack reserve: 0x%016I64x", VAL64(pOptHeader->SizeOfStackReserve)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of stack commit: 0x%016I64x", VAL64(pOptHeader->SizeOfStackCommit)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of heap reserve: 0x%016I64x", VAL64(pOptHeader->SizeOfHeapReserve)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Size of heap commit: 0x%016I64x", VAL64(pOptHeader->SizeOfHeapCommit)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Loader flags: 0x%08x", VAL32(pOptHeader->LoaderFlags)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Directories: 0x%08x", VAL32(pOptHeader->NumberOfRvaAndSizes)); printLine(GUICookie,szStr); DUMP_DIRECTORY("Export Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT]); DUMP_DIRECTORY("Import Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT]); DUMP_DIRECTORY("Resource Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_RESOURCE]); DUMP_DIRECTORY("Exception Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_EXCEPTION]); DUMP_DIRECTORY("Security Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_SECURITY]); DUMP_DIRECTORY("Base Relocation Table: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_BASERELOC]); DUMP_DIRECTORY("Debug Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_DEBUG]); DUMP_DIRECTORY("Architecture Specific: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_ARCHITECTURE]); DUMP_DIRECTORY("Global Pointer: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_GLOBALPTR]); DUMP_DIRECTORY("TLS Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_TLS]); DUMP_DIRECTORY("Load Config Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG]); DUMP_DIRECTORY("Bound Import Directory: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT]); DUMP_DIRECTORY("Import Address Table: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_IAT]); DUMP_DIRECTORY("Delay Load IAT: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT]); DUMP_DIRECTORY("CLR Header: ", pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR]); printLine(GUICookie,""); DumpSectionHeaders((IMAGE_SECTION_HEADER*)(pOptHeader+1),pNTHeader->FileHeader.NumberOfSections,GUICookie); DumpBaseReloc("Base Relocation Table",&pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_BASERELOC],GUICookie); DumpIAT("Import Address Table", &pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT],GUICookie); DumpIAT("Delay Load Import Address Table", &pOptHeader->DataDirectory[IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT],GUICookie); } if(dwAddrOfEntryPoint != 0) DumpEntryPoint(dwAddrOfEntryPoint,dwEntryPointSize,GUICookie); printLine(GUICookie,""); printLine(GUICookie,""); if (!CORHeader) { printLine(GUICookie,RstrUTF(IDS_E_COMIMAGE)); return; } strcpy_s(szString,SZSTRING_SIZE,"// ----- CLR Header:"); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Header size: 0x%08x", VAL32(CORHeader->cb)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Major runtime version: 0x%04x", VAL16(CORHeader->MajorRuntimeVersion)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Minor runtime version: 0x%04x", VAL16(CORHeader->MinorRuntimeVersion)); printLine(GUICookie,szStr); // Metadata DUMP_DIRECTORY("Metadata Directory: ", CORHeader->MetaData); sprintf_s(szString,SZSTRING_SIZE,"// Flags: 0x%08x", VAL32(CORHeader->Flags)); printLine(GUICookie,szStr); sprintf_s(szString,SZSTRING_SIZE,"// Entry point token: 0x%08x", VAL32(IMAGE_COR20_HEADER_FIELD(*CORHeader, EntryPointToken))); printLine(GUICookie,szStr); // Binding DUMP_DIRECTORY("Resources Directory: ", CORHeader->Resources); DUMP_DIRECTORY("Strong Name Signature: ", CORHeader->StrongNameSignature); DUMP_DIRECTORY("CodeManager Table: ", CORHeader->CodeManagerTable); // Fixups DUMP_DIRECTORY("VTableFixups Directory: ", CORHeader->VTableFixups); DUMP_DIRECTORY("Export Address Table: ", CORHeader->ExportAddressTableJumps); // Managed Native Code DUMP_DIRECTORY("Precompile Header: ", CORHeader->ManagedNativeHeader); DumpMetadataHeader("Metadata Header",&(CORHeader->MetaData),GUICookie); } #ifdef _PREFAST_ #pragma warning(pop) #endif void DumpHeaderDetails(IMAGE_COR20_HEADER *CORHeader, void* GUICookie) { printLine(GUICookie,""); DumpCodeManager(CORHeader,GUICookie); printLine(GUICookie,""); DumpVTables(CORHeader,GUICookie); printLine(GUICookie,""); DumpEATTable(CORHeader,GUICookie); printLine(GUICookie,""); } void WritePerfData(const char *KeyDesc, const char *KeyName, const char *UnitDesc, const char *UnitName, void* Value, BOOL IsInt) { DWORD BytesWritten; if(!g_fDumpToPerfWriter) return; if (!g_PerfDataFilePtr) { if((g_PerfDataFilePtr = WszCreateFile(W("c:\\temp\\perfdata.dat"), GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ, NULL, OPEN_ALWAYS, 0, NULL) ) == INVALID_HANDLE_VALUE) { printLine(NULL,"PefTimer::LogStoppedTime(): Unable to open the FullPath file. No performance data will be generated"); g_fDumpToPerfWriter = FALSE; return; } WriteFile(g_PerfDataFilePtr,"ExecTime=0\r\n",13,&BytesWritten,NULL); WriteFile(g_PerfDataFilePtr,"ExecUnit=bytes\r\n",17,&BytesWritten,NULL); WriteFile(g_PerfDataFilePtr,"ExecUnitDescr=File Size\r\n",26,&BytesWritten,NULL); WriteFile(g_PerfDataFilePtr,"ExeciDirection=False\r\n",23,&BytesWritten,NULL); } char ValueStr[10]; char TmpStr[201]; if (IsInt) { sprintf_s(ValueStr,10,"%d",(int)*(int*)Value); } else { sprintf_s(ValueStr,10,"%5.2f",(float)*(float*)Value); } sprintf_s(TmpStr, 201, "%s=%s\r\n", KeyName, ValueStr); WriteFile(g_PerfDataFilePtr, TmpStr, (DWORD)strlen(TmpStr), &BytesWritten, NULL); sprintf_s(TmpStr, 201, "%s Descr=%s\r\n", KeyName, KeyDesc); WriteFile(g_PerfDataFilePtr, TmpStr, (DWORD)strlen(TmpStr), &BytesWritten, NULL); sprintf_s(TmpStr, 201, "%s Unit=%s\r\n", KeyName, UnitName); WriteFile(g_PerfDataFilePtr, TmpStr, (DWORD)strlen(TmpStr), &BytesWritten, NULL); sprintf_s(TmpStr, 201, "%s Unit Descr=%s\r\n", KeyName, UnitDesc); WriteFile(g_PerfDataFilePtr, TmpStr, (DWORD)strlen(TmpStr), &BytesWritten, NULL); sprintf_s(TmpStr, 201, "%s IDirection=%s\r\n", KeyName, "False"); WriteFile(g_PerfDataFilePtr, TmpStr, (DWORD)strlen(TmpStr), &BytesWritten, NULL); } void WritePerfDataInt(const char *KeyDesc, const char *KeyName, const char *UnitDesc, const char *UnitName, int Value) { WritePerfData(KeyDesc,KeyName,UnitDesc,UnitName, (void*)&Value, TRUE); } void WritePerfDataFloat(const char *KeyDesc, const char *KeyName, const char *UnitDesc, const char *UnitName, float Value) { WritePerfData(KeyDesc,KeyName,UnitDesc,UnitName, (void*)&Value, FALSE); } IMetaDataTables *pITables = NULL; //ULONG sizeRec, count; //int size, size2; int metaSize = 0; __int64 fTableSeen; inline void TableSeen(unsigned long n) { fTableSeen |= (I64(1) << n); } inline int IsTableSeen(unsigned long n) { return (fTableSeen & (I64(1) << n)) ? 1 : 0;} inline void TableSeenReset() { fTableSeen = 0;} void DumpTable(unsigned long Table, const char *TableName, void* GUICookie) { char *szStr = &szString[0]; const char **ppTableName = 0; int size; ULONG sizeRec, count; // Record that this table has been seen. TableSeen(Table); // If no name passed in, get from table info. if (!TableName) ppTableName = &TableName; pITables->GetTableInfo(Table, &sizeRec, &count, NULL, NULL, ppTableName); if(count > 0) { metaSize += size = count * sizeRec; WritePerfDataInt(TableName,TableName,"count","count",count); WritePerfDataInt(TableName,TableName,"bytes","bytes",size); sprintf_s(szString,SZSTRING_SIZE,"// %-14s- %4d (%d bytes)", TableName, count, size); printLine(GUICookie,szStr); } } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:21000) // Suppress PREFast warning about overly large function #endif void DumpStatistics(IMAGE_COR20_HEADER *CORHeader, void* GUICookie) { int fileSize, miscPESize, miscCOMPlusSize, methodHeaderSize, methodBodySize; int methodBodies, fatHeaders, tinyHeaders, deprecatedHeaders; int size, size2; int fatSections, smallSections; ULONG methodDefs; ULONG i; ULONG sizeRec, count; char buf[MAX_MEMBER_LENGTH]; char* szStr = &szString[0]; TableSeenReset(); metaSize = 0; sprintf_s(szString,SZSTRING_SIZE,"// File size : %d", fileSize = SafeGetFileSize(g_pPELoader->getHFile(), NULL)); printLine(GUICookie,szStr); WritePerfDataInt("FileSize","FileSize","standard byte","bytes",fileSize); if (g_pPELoader->IsPE32()) { size = VAL32(((IMAGE_DOS_HEADER*) g_pPELoader->getHModule())->e_lfanew) + sizeof(IMAGE_NT_HEADERS32) - sizeof(IMAGE_OPTIONAL_HEADER32) + VAL16(g_pPELoader->ntHeaders32()->FileHeader.SizeOfOptionalHeader) + VAL16(g_pPELoader->ntHeaders32()->FileHeader.NumberOfSections) * sizeof(IMAGE_SECTION_HEADER); size2 = (size + VAL32(g_pPELoader->ntHeaders32()->OptionalHeader.FileAlignment) - 1) & ~(VAL32(g_pPELoader->ntHeaders32()->OptionalHeader.FileAlignment) - 1); } else { size = VAL32(((IMAGE_DOS_HEADER*) g_pPELoader->getHModule())->e_lfanew) + sizeof(IMAGE_NT_HEADERS64) - sizeof(IMAGE_OPTIONAL_HEADER64) + VAL16(g_pPELoader->ntHeaders64()->FileHeader.SizeOfOptionalHeader) + VAL16(g_pPELoader->ntHeaders64()->FileHeader.NumberOfSections) * sizeof(IMAGE_SECTION_HEADER); size2 = (size + VAL32(g_pPELoader->ntHeaders64()->OptionalHeader.FileAlignment) - 1) & ~(VAL32(g_pPELoader->ntHeaders64()->OptionalHeader.FileAlignment) - 1); } DWORD sizeOfHeaders; if (g_pPELoader->IsPE32()) { sizeOfHeaders = VAL32(g_pPELoader->ntHeaders32()->OptionalHeader.SizeOfHeaders); WritePerfDataInt("PE header size", "PE header size", "standard byte", "bytes", sizeOfHeaders); WritePerfDataInt("PE header size used", "PE header size used", "standard byte", "bytes", size); WritePerfDataFloat("PE header size", "PE header size", "percentage", "percentage", (float)((sizeOfHeaders * 100) / fileSize)); sprintf_s(szString,SZSTRING_SIZE,"// PE header size : %d (%d used) (%5.2f%%)", sizeOfHeaders, size, (double) (sizeOfHeaders * 100) / fileSize); printLine(GUICookie,szStr); miscPESize = 0; for (i=0; i < VAL32(g_pPELoader->ntHeaders32()->OptionalHeader.NumberOfRvaAndSizes); ++i) { // Skip the CLR header. if (i != 15) miscPESize += (int) VAL32(g_pPELoader->ntHeaders32()->OptionalHeader.DataDirectory[i].Size); } } else { sizeOfHeaders = VAL32(g_pPELoader->ntHeaders64()->OptionalHeader.SizeOfHeaders); WritePerfDataInt("PE+ header size", "PE header size", "standard byte", "bytes", sizeOfHeaders); WritePerfDataInt("PE+ header size used", "PE header size used", "standard byte", "bytes", size); WritePerfDataFloat("PE+ header size", "PE header size", "percentage", "percentage", (float)((sizeOfHeaders * 100) / fileSize)); sprintf_s(szString,SZSTRING_SIZE,"// PE header size : %d (%d used) (%5.2f%%)", sizeOfHeaders, size, (double) (sizeOfHeaders * 100) / fileSize); printLine(GUICookie,szStr); miscPESize = 0; for (i=0; i < VAL32(g_pPELoader->ntHeaders64()->OptionalHeader.NumberOfRvaAndSizes); ++i) { // Skip the CLR header. if (i != IMAGE_DIRECTORY_ENTRY_COMHEADER) miscPESize += (int) VAL32(g_pPELoader->ntHeaders64()->OptionalHeader.DataDirectory[i].Size); } } WritePerfDataInt("PE additional info", "PE additional info", "standard byte", "bytes",miscPESize); WritePerfDataFloat("PE additional info", "PE additional info", "percentage", "percent", (float) ((miscPESize * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "PE additional info : %d", miscPESize); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (miscPESize * 100) / fileSize); printLine(GUICookie,szStr); WORD numberOfSections; if (g_pPELoader->IsPE32()) { numberOfSections = VAL16(g_pPELoader->ntHeaders32()->FileHeader.NumberOfSections); } else { numberOfSections = VAL16(g_pPELoader->ntHeaders64()->FileHeader.NumberOfSections); } WritePerfDataInt("Num.of PE sections", "Num.of PE sections", "Nbr of sections", "sections",numberOfSections); sprintf_s(szString,SZSTRING_SIZE,"// Num.of PE sections : %d", numberOfSections); printLine(GUICookie,szStr); WritePerfDataInt("CLR header size", "CLR header size", "byte", "bytes",VAL32(CORHeader->cb)); WritePerfDataFloat("CLR header size", "CLR header size", "percentage", "percent",(float) ((VAL32(CORHeader->cb) * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "CLR header size : %d", VAL32(CORHeader->cb)); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (VAL32(CORHeader->cb) * 100) / fileSize); printLine(GUICookie,szStr); DWORD dwMetaSize = g_cbMetaData; WritePerfDataInt("CLR meta-data size", "CLR meta-data size", "bytes", "bytes",dwMetaSize); WritePerfDataFloat("CLR meta-data size", "CLR meta-data size", "percentage", "percent",(float) ((dwMetaSize * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "CLR meta-data size : %d", dwMetaSize); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (dwMetaSize * 100) / fileSize); printLine(GUICookie,szStr); IMAGE_DATA_DIRECTORY *pFirst = &CORHeader->Resources; ULONG32 iCount = (ULONG32)((BYTE *) &CORHeader->ManagedNativeHeader - (BYTE *) &CORHeader->Resources) / sizeof(IMAGE_DATA_DIRECTORY) + 1; miscCOMPlusSize = 0; for (ULONG32 iDir=0; iDir<iCount; iDir++) { miscCOMPlusSize += VAL32(pFirst->Size); pFirst++; } WritePerfDataInt("CLR Additional info", "CLR Additional info", "bytes", "bytes",miscCOMPlusSize); WritePerfDataFloat("CLR Additional info", "CLR Additional info", "percentage", "percent",(float) ((miscCOMPlusSize * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "CLR additional info : %d", miscCOMPlusSize); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (miscCOMPlusSize * 100) / fileSize); printLine(GUICookie,szStr); // Go through each method def collecting some statistics. methodHeaderSize = methodBodySize = 0; methodBodies = fatHeaders = tinyHeaders = deprecatedHeaders = fatSections = smallSections = 0; methodDefs = g_pImport->GetCountWithTokenKind(mdtMethodDef); for (i=1; i <= methodDefs; ++i) { ULONG rva; DWORD flags; if (FAILED(g_pImport->GetMethodImplProps(TokenFromRid(i, mdtMethodDef), &rva, &flags))) { continue; } if ((rva != 0)&&(IsMiIL(flags) || IsMiOPTIL(flags))) // We don't handle native yet. { ++methodBodies; COR_ILMETHOD_FAT *pMethod = NULL; g_pPELoader->getVAforRVA(rva, (void **) &pMethod); if (pMethod->IsFat()) { ++fatHeaders; methodHeaderSize += pMethod->GetSize() * 4; methodBodySize += pMethod->GetCodeSize(); // Add in the additional sections. BYTE *sectsBegin = (BYTE *) (pMethod->GetCode() + pMethod->GetCodeSize()); const COR_ILMETHOD_SECT *pSect = pMethod->GetSect(); const COR_ILMETHOD_SECT *pOldSect; if (pSect != NULL) { // Keep skipping a pointer past each section. do { pOldSect = pSect; if (((COR_ILMETHOD_SECT_FAT *) pSect)->GetKind() & CorILMethod_Sect_FatFormat) { ++fatSections; pSect = (COR_ILMETHOD_SECT *)((BYTE *) pSect + ((COR_ILMETHOD_SECT_FAT *) pSect)->GetDataSize()); } else { ++smallSections; pSect = (COR_ILMETHOD_SECT *)((BYTE *) pSect + ((COR_ILMETHOD_SECT_SMALL *) pSect)->DataSize); } pSect = (COR_ILMETHOD_SECT *) (((UINT_PTR) pSect + 3) & ~3); } while (pOldSect->More()); // Add on the section sizes. methodHeaderSize += (int) ((BYTE *) pSect - sectsBegin); } } else if (((COR_ILMETHOD_TINY *) pMethod)->IsTiny()) { ++tinyHeaders; methodHeaderSize += sizeof(COR_ILMETHOD_TINY); methodBodySize += ((COR_ILMETHOD_TINY *) pMethod)->GetCodeSize(); } else { _ASSERTE(!"Unrecognized header type"); } } } WritePerfDataInt("CLR method headers", "CLR method headers", "bytes", "bytes",methodHeaderSize); WritePerfDataFloat("CLR method headers", "CLR method headers", "percentage", "percent",(float) ((methodHeaderSize * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "CLR method headers : %d", methodHeaderSize); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (methodHeaderSize * 100) / fileSize); printLine(GUICookie,szStr); WritePerfDataInt("Managed code", "Managed code", "bytes", "bytes",methodBodySize); WritePerfDataFloat("Managed code", "Managed code", "percentage", "percent",(float) ((methodBodySize * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "Managed code : %d", methodBodySize); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (methodBodySize * 100) / fileSize); printLine(GUICookie,szStr); if (g_pPELoader->IsPE32()) { DWORD sizeOfInitializedData = VAL32(g_pPELoader->ntHeaders32()->OptionalHeader.SizeOfInitializedData); WritePerfDataInt("Data", "Data", "bytes", "bytes",sizeOfInitializedData); WritePerfDataFloat("Data", "Data", "percentage", "percent",(float) ((sizeOfInitializedData * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "Data : %d", sizeOfInitializedData); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (sizeOfInitializedData * 100) / fileSize); printLine(GUICookie,szStr); size = fileSize - g_pPELoader->ntHeaders32()->OptionalHeader.SizeOfHeaders - miscPESize - CORHeader->cb - g_cbMetaData - miscCOMPlusSize - sizeOfInitializedData - methodHeaderSize - methodBodySize; } else { DWORD sizeOfInitializedData = VAL32(g_pPELoader->ntHeaders64()->OptionalHeader.SizeOfInitializedData); WritePerfDataInt("Data", "Data", "bytes", "bytes",sizeOfInitializedData); WritePerfDataFloat("Data", "Data", "percentage", "percent",(float) ((sizeOfInitializedData * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "Data : %d", sizeOfInitializedData); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (sizeOfInitializedData * 100) / fileSize); printLine(GUICookie,szStr); size = fileSize - g_pPELoader->ntHeaders64()->OptionalHeader.SizeOfHeaders - miscPESize - CORHeader->cb - g_cbMetaData - miscCOMPlusSize - sizeOfInitializedData - methodHeaderSize - methodBodySize; } WritePerfDataInt("Unaccounted", "Unaccounted", "bytes", "bytes",size); WritePerfDataFloat("Unaccounted", "Unaccounted", "percentage", "percent",(float) ((size * 100) / fileSize)); sprintf_s(buf, MAX_MEMBER_LENGTH, "Unaccounted : %d", size); sprintf_s(szString,SZSTRING_SIZE,"// %-40s (%5.2f%%)", buf, (double) (size * 100) / fileSize); printLine(GUICookie,szStr); // Detail... if (g_pPELoader->IsPE32()) { numberOfSections = VAL16(g_pPELoader->ntHeaders32()->FileHeader.NumberOfSections); WritePerfDataInt("Num.of PE sections", "Num.of PE sections", "bytes", "bytes",numberOfSections); printLine(GUICookie,""); sprintf_s(szString,SZSTRING_SIZE,"// Num.of PE sections : %d", numberOfSections); printLine(GUICookie,szStr); IMAGE_SECTION_HEADER *pSecHdr = IMAGE_FIRST_SECTION(g_pPELoader->ntHeaders32()); for (i=0; i < numberOfSections; ++i) { WritePerfDataInt((char*)pSecHdr->Name,(char*)pSecHdr->Name, "bytes", "bytes",VAL32(pSecHdr->SizeOfRawData)); sprintf_s(szString,SZSTRING_SIZE,"// %-8s - %d", pSecHdr->Name, VAL32(pSecHdr->SizeOfRawData)); printLine(GUICookie,szStr); ++pSecHdr; } } else { numberOfSections = VAL16(g_pPELoader->ntHeaders64()->FileHeader.NumberOfSections); WritePerfDataInt("Num.of PE sections", "Num.of PE sections", "bytes", "bytes",numberOfSections); printLine(GUICookie,""); sprintf_s(szString,SZSTRING_SIZE,"// Num.of PE sections : %d", numberOfSections); printLine(GUICookie,szStr); IMAGE_SECTION_HEADER *pSecHdr = IMAGE_FIRST_SECTION(g_pPELoader->ntHeaders64()); for (i=0; i < numberOfSections; ++i) { WritePerfDataInt((char*)pSecHdr->Name,(char*)pSecHdr->Name, "bytes", "bytes",pSecHdr->SizeOfRawData); sprintf_s(szString,SZSTRING_SIZE,"// %-8s - %d", pSecHdr->Name, pSecHdr->SizeOfRawData); printLine(GUICookie,szStr); ++pSecHdr; } } if (FAILED(g_pPubImport->QueryInterface(IID_IMetaDataTables, (void**)&pITables))) { sprintf_s(szString,SZSTRING_SIZE,"// Unable to get IMetaDataTables interface"); printLine(GUICookie,szStr); return; } if (pITables == 0) { printLine(GUICookie,RstrUTF(IDS_E_MDDETAILS)); return; } else { DWORD Size = g_cbMetaData; WritePerfDataInt("CLR meta-data size", "CLR meta-data size", "bytes", "bytes",Size); printLine(GUICookie,""); sprintf_s(szString,SZSTRING_SIZE,"// CLR meta-data size : %d", Size); printLine(GUICookie,szStr); metaSize = 0; pITables->GetTableInfo(TBL_Module, &sizeRec, &count, NULL, NULL, NULL); TableSeen(TBL_Module); metaSize += size = count * sizeRec; \ WritePerfDataInt("Module (count)", "Module (count)", "count", "count",count); WritePerfDataInt("Module (bytes)", "Module (bytes)", "bytes", "bytes",size); sprintf_s(szString,SZSTRING_SIZE,"// %-14s- %4d (%d bytes)", "Module", count, size); \ printLine(GUICookie,szStr); if ((count = g_pImport->GetCountWithTokenKind(mdtTypeDef)) > 0) { int flags, interfaces = 0, explicitLayout = 0; for (i=1; i <= count; ++i) { if (FAILED(g_pImport->GetTypeDefProps(TokenFromRid(i, mdtTypeDef), (ULONG *) &flags, NULL))) { continue; } if (flags & tdInterface) ++interfaces; if (flags & tdExplicitLayout) ++explicitLayout; } // Get count from table -- count reported by GetCount... doesn't include the "global" typedef. pITables->GetTableInfo(TBL_TypeDef, &sizeRec, &count, NULL, NULL, NULL); TableSeen(TBL_TypeDef); metaSize += size = count * sizeRec; WritePerfDataInt("TypeDef (count)", "TypeDef (count)", "count", "count", count); WritePerfDataInt("TypeDef (bytes)", "TypeDef (bytes)", "bytes", "bytes", size); WritePerfDataInt("interfaces", "interfaces", "count", "count", interfaces); WritePerfDataInt("explicitLayout", "explicitLayout", "count", "count", explicitLayout); sprintf_s(buf, MAX_MEMBER_LENGTH, " TypeDef - %4d (%d bytes)", count, size); sprintf_s(szString,SZSTRING_SIZE,"// %-38s %d interfaces, %d explicit layout", buf, interfaces, explicitLayout); printLine(GUICookie,szStr); } } pITables->GetTableInfo(TBL_TypeRef, &sizeRec, &count, NULL, NULL, NULL); TableSeen(TBL_TypeRef); if (count > 0) { metaSize += size = count * sizeRec; \ WritePerfDataInt("TypeRef (count)", "TypeRef (count)", "count", "count", count); WritePerfDataInt("TypeRef (bytes)", "TypeRef (bytes)", "bytes", "bytes", size); sprintf_s(szString,SZSTRING_SIZE,"// %-14s- %4d (%d bytes)", "TypeRef", count, size); \ printLine(GUICookie,szStr); } if ((count = g_pImport->GetCountWithTokenKind(mdtMethodDef)) > 0) { int flags, abstract = 0, native = 0; for (i=1; i <= count; ++i) { if (FAILED(g_pImport->GetMethodDefProps(TokenFromRid(i, mdtMethodDef), (DWORD *)&flags))) { sprintf_s(szString, SZSTRING_SIZE, "// Invalid MethodDef %08X record", TokenFromRid(i, mdtMethodDef)); printLine(GUICookie, szStr); return; } if (flags & mdAbstract) ++abstract; } pITables->GetTableInfo(TBL_Method, &sizeRec, NULL, NULL, NULL, NULL); TableSeen(TBL_Method); if (count > 0) { metaSize += size = count * sizeRec; WritePerfDataInt("MethodDef (count)", "MethodDef (count)", "count", "count", count); WritePerfDataInt("MethodDef (bytes)", "MethodDef (bytes)", "bytes", "bytes", size); WritePerfDataInt("abstract", "abstract", "count", "count", abstract); WritePerfDataInt("native", "native", "count", "count", native); WritePerfDataInt("methodBodies", "methodBodies", "count", "count", methodBodies); sprintf_s(buf, MAX_MEMBER_LENGTH, " MethodDef - %4d (%d bytes)", count, size); sprintf_s(szString,SZSTRING_SIZE,"// %-38s %d abstract, %d native, %d bodies", buf, abstract, native, methodBodies); printLine(GUICookie,szStr); } } if ((count = g_pImport->GetCountWithTokenKind(mdtFieldDef)) > 0) { int flags, constants = 0; for (i=1; i <= count; ++i) { if (FAILED(g_pImport->GetFieldDefProps(TokenFromRid(i, mdtFieldDef), (DWORD *)&flags))) { sprintf_s(szString, SZSTRING_SIZE, "// Invalid FieldDef %08X record", TokenFromRid(i, mdtFieldDef)); printLine(GUICookie, szStr); return; } if ((flags & (fdStatic|fdInitOnly)) == (fdStatic|fdInitOnly)) ++constants; } pITables->GetTableInfo(TBL_Field, &sizeRec, NULL, NULL, NULL, NULL); metaSize += size = count * sizeRec; WritePerfDataInt("FieldDef (count)", "FieldDef (count)", "count", "count", count); WritePerfDataInt("FieldDef (bytes)", "FieldDef (bytes)", "bytes", "bytes", size); WritePerfDataInt("constant", "constant", "count", "count", constants); sprintf_s(buf, MAX_MEMBER_LENGTH, " FieldDef - %4d (%d bytes)", count, size); sprintf_s(szString,SZSTRING_SIZE,"// %-38s %d constant", buf, constants); printLine(GUICookie,szStr); TableSeen(TBL_Field); } DumpTable(TBL_MemberRef, "MemberRef", GUICookie); DumpTable(TBL_Param, "ParamDef", GUICookie); DumpTable(TBL_MethodImpl, "MethodImpl", GUICookie); DumpTable(TBL_Constant, "Constant", GUICookie); DumpTable(TBL_CustomAttribute, "CustomAttribute", GUICookie); DumpTable(TBL_FieldMarshal, "NativeType", GUICookie); DumpTable(TBL_ClassLayout, "ClassLayout", GUICookie); DumpTable(TBL_FieldLayout, "FieldLayout", GUICookie); DumpTable(TBL_StandAloneSig, "StandAloneSig", GUICookie); DumpTable(TBL_InterfaceImpl, "InterfaceImpl", GUICookie); DumpTable(TBL_PropertyMap, "PropertyMap", GUICookie); DumpTable(TBL_Property, "Property", GUICookie); DumpTable(TBL_MethodSemantics, "MethodSemantic", GUICookie); DumpTable(TBL_DeclSecurity, "Security", GUICookie); DumpTable(TBL_TypeSpec, "TypeSpec", GUICookie); DumpTable(TBL_ModuleRef, "ModuleRef", GUICookie); DumpTable(TBL_Assembly, "Assembly", GUICookie); DumpTable(TBL_AssemblyProcessor, "AssemblyProcessor", GUICookie); DumpTable(TBL_AssemblyOS, "AssemblyOS", GUICookie); DumpTable(TBL_AssemblyRef, "AssemblyRef", GUICookie); DumpTable(TBL_AssemblyRefProcessor, "AssemblyRefProcessor", GUICookie); DumpTable(TBL_AssemblyRefOS, "AssemblyRefOS", GUICookie); DumpTable(TBL_File, "File", GUICookie); DumpTable(TBL_ExportedType, "ExportedType", GUICookie); DumpTable(TBL_ManifestResource, "ManifestResource", GUICookie); DumpTable(TBL_NestedClass, "NestedClass", GUICookie); // Rest of the tables. pITables->GetNumTables(&count); for (i=0; i<count; ++i) { if (!IsTableSeen(i)) DumpTable(i, NULL, GUICookie); } // String heap pITables->GetStringHeapSize(&sizeRec); if (sizeRec > 0) { metaSize += sizeRec; WritePerfDataInt("Strings", "Strings", "bytes", "bytes",sizeRec); sprintf_s(szString,SZSTRING_SIZE,"// Strings - %5d bytes", sizeRec); printLine(GUICookie,szStr); } // Blob heap pITables->GetBlobHeapSize(&sizeRec); if (sizeRec > 0) { metaSize += sizeRec; WritePerfDataInt("Blobs", "Blobs", "bytes", "bytes",sizeRec); sprintf_s(szString,SZSTRING_SIZE,"// Blobs - %5d bytes", sizeRec); printLine(GUICookie,szStr); } // User String Heap pITables->GetUserStringHeapSize(&sizeRec); if (sizeRec > 0) { metaSize += sizeRec; WritePerfDataInt("UserStrings", "UserStrings", "bytes", "bytes",sizeRec); sprintf_s(szString,SZSTRING_SIZE,"// UserStrings - %5d bytes", sizeRec); printLine(GUICookie,szStr); } // Guid heap pITables->GetGuidHeapSize(&sizeRec); if (sizeRec > 0) { metaSize += sizeRec; WritePerfDataInt("Guids", "Guids", "bytes", "bytes", sizeRec); sprintf_s(szString,SZSTRING_SIZE,"// Guids - %5d bytes", sizeRec); printLine(GUICookie,szStr); } if (g_cbMetaData - metaSize > 0) { WritePerfDataInt("Uncategorized", "Uncategorized", "bytes", "bytes",g_cbMetaData - metaSize); sprintf_s(szString,SZSTRING_SIZE,"// Uncategorized - %5d bytes", g_cbMetaData - metaSize); printLine(GUICookie,szStr); } if (miscCOMPlusSize != 0) { WritePerfDataInt("CLR additional info", "CLR additional info", "bytes", "bytes", miscCOMPlusSize); sprintf_s(szString,SZSTRING_SIZE,"// CLR additional info : %d", miscCOMPlusSize); printLine(GUICookie,""); printLine(GUICookie,szStr); if (CORHeader->CodeManagerTable.Size != 0) { WritePerfDataInt("CodeManagerTable", "CodeManagerTable", "bytes", "bytes", VAL32(CORHeader->CodeManagerTable.Size)); sprintf_s(szString,SZSTRING_SIZE,"// CodeManagerTable - %d", VAL32(CORHeader->CodeManagerTable.Size)); printLine(GUICookie,szStr); } if (CORHeader->VTableFixups.Size != 0) { WritePerfDataInt("VTableFixups", "VTableFixups", "bytes", "bytes", VAL32(CORHeader->VTableFixups.Size)); sprintf_s(szString,SZSTRING_SIZE,"// VTableFixups - %d", VAL32(CORHeader->VTableFixups.Size)); printLine(GUICookie,szStr); } if (CORHeader->Resources.Size != 0) { WritePerfDataInt("Resources", "Resources", "bytes", "bytes", VAL32(CORHeader->Resources.Size)); sprintf_s(szString,SZSTRING_SIZE,"// Resources - %d", VAL32(CORHeader->Resources.Size)); printLine(GUICookie,szStr); } } WritePerfDataInt("CLR method headers", "CLR method headers", "count", "count", methodHeaderSize); sprintf_s(szString,SZSTRING_SIZE,"// CLR method headers : %d", methodHeaderSize); printLine(GUICookie,""); printLine(GUICookie,szStr); WritePerfDataInt("Num.of method bodies", "Num.of method bodies", "count", "count",methodBodies); sprintf_s(szString,SZSTRING_SIZE,"// Num.of method bodies - %d", methodBodies); printLine(GUICookie,szStr); WritePerfDataInt("Num.of fat headers", "Num.of fat headers", "count", "count", fatHeaders); sprintf_s(szString,SZSTRING_SIZE,"// Num.of fat headers - %d", fatHeaders); printLine(GUICookie,szStr); WritePerfDataInt("Num.of tiny headers", "Num.of tiny headers", "count", "count", tinyHeaders); sprintf_s(szString,SZSTRING_SIZE,"// Num.of tiny headers - %d", tinyHeaders); printLine(GUICookie,szStr); if (deprecatedHeaders > 0) { WritePerfDataInt("Num.of old headers", "Num.of old headers", "count", "count", deprecatedHeaders); sprintf_s(szString,SZSTRING_SIZE,"// Num.of old headers - %d", deprecatedHeaders); printLine(GUICookie,szStr); } if (fatSections != 0 || smallSections != 0) { WritePerfDataInt("Num.of fat sections", "Num.of fat sections", "count", "count", fatSections); sprintf_s(szString,SZSTRING_SIZE,"// Num.of fat sections - %d", fatSections); printLine(GUICookie,szStr); WritePerfDataInt("Num.of small section", "Num.of small section", "count", "count", smallSections); sprintf_s(szString,SZSTRING_SIZE,"// Num.of small sections - %d", smallSections); printLine(GUICookie,szStr); } WritePerfDataInt("Managed code", "Managed code", "bytes", "bytes", methodBodySize); sprintf_s(szString,SZSTRING_SIZE,"// Managed code : %d", methodBodySize); printLine(GUICookie,""); printLine(GUICookie,szStr); if (methodBodies != 0) { WritePerfDataInt("Ave method size", "Ave method size", "bytes", "bytes", methodBodySize / methodBodies); sprintf_s(szString,SZSTRING_SIZE,"// Ave method size - %d", methodBodySize / methodBodies); printLine(GUICookie,szStr); } if (pITables) pITables->Release(); if(g_fDumpToPerfWriter) CloseHandle((char*) g_PerfDataFilePtr); } #ifdef _PREFAST_ #pragma warning(pop) #endif void DumpHexbytes(__inout __nullterminated char* szptr,BYTE *pb, DWORD fromPtr, DWORD toPtr, DWORD limPtr) { char sz[256]; int k = 0,i; DWORD curPtr = 0; bool printsz = FALSE; BYTE zero = 0; *szptr = 0; for(i = 0,k = 0,curPtr=fromPtr; curPtr < toPtr; i++,k++,curPtr++,pb++) { if(k == 16) { if(printsz) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT(" // %s"),sz); printLine(g_pFile,szString); szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s ",g_szAsmCodeIndent); k = 0; printsz = FALSE; } if(curPtr >= limPtr) pb = &zero; // at limPtr and after, pad with 0 else { PAL_CPP_TRY { sz[k] = *pb; // check the ptr validity } PAL_CPP_CATCH_ALL { pb = &zero; } PAL_CPP_ENDTRY; } szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," %2.2X", *pb); if(isprint(*pb)) { if(g_fDumpRTF) { if((*pb == '\\')||(*pb=='{')||(*pb=='}')) sz[k++]='\\'; sz[k] = *pb; } else if(g_fDumpHTML) { if(*pb == '<') { sz[k] = 0; strcat_s(sz,256-k,LTN()); k+=(int)(strlen(LTN())); } else if(*pb == '>') { sz[k] = 0; strcat_s(sz,256-k,GTN()); k+=(int)(strlen(GTN())); } } else sz[k] = *pb; printsz = TRUE; } else { sz[k] = '.'; } sz[k+1] = 0; } szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),") "); if(printsz) { for(i = k; i < 16; i++) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," "); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("// %s"),sz); } printLine(g_pFile,szString); } struct VTableEntry { DWORD dwAddr; WORD wCount; WORD wType; }; struct ExpDirTable { DWORD dwFlags; DWORD dwDateTime; WORD wVMajor; WORD wVMinor; DWORD dwNameRVA; DWORD dwOrdinalBase; DWORD dwNumATEntries; DWORD dwNumNamePtrs; DWORD dwAddrTableRVA; DWORD dwNamePtrRVA; DWORD dwOrdTableRVA; }; void DumpEATEntries(void* GUICookie, IMAGE_NT_HEADERS32 *pNTHeader32, IMAGE_OPTIONAL_HEADER32 *pOptHeader32, IMAGE_NT_HEADERS64 *pNTHeader64, IMAGE_OPTIONAL_HEADER64 *pOptHeader64) { IMAGE_DATA_DIRECTORY *pExportDir = NULL; IMAGE_SECTION_HEADER *pSecHdr = NULL; DWORD i,j,N; BOOL bpOpt = FALSE; if (g_pPELoader->IsPE32()) { pExportDir = pOptHeader32->DataDirectory; pSecHdr = IMAGE_FIRST_SECTION(pNTHeader32); N = VAL16(pNTHeader32->FileHeader.NumberOfSections); if (pOptHeader32->NumberOfRvaAndSizes) bpOpt = TRUE; } else { pExportDir = pOptHeader64->DataDirectory; pSecHdr = IMAGE_FIRST_SECTION(pNTHeader64); N = VAL16(pNTHeader64->FileHeader.NumberOfSections); if (pOptHeader64->NumberOfRvaAndSizes) bpOpt = TRUE; } if(bpOpt) { ExpDirTable *pExpTable = NULL; if(pExportDir->Size) { #ifdef _DEBUG printLine(GUICookie,COMMENT((char*)0)); // start multiline comment sprintf_s(szString,SZSTRING_SIZE,"// Export dir VA=%X size=%X ",VAL32(pExportDir->VirtualAddress),VAL32(pExportDir->Size)); printLine(GUICookie,szString); #endif DWORD vaExpTable = VAL32(pExportDir->VirtualAddress); for (i=0; i < N; i++,pSecHdr++) { if((vaExpTable >= VAL32(pSecHdr->VirtualAddress))&& (vaExpTable < VAL32(pSecHdr->VirtualAddress)+VAL32(pSecHdr->Misc.VirtualSize))) { pExpTable = (ExpDirTable*)( g_pPELoader->base() + VAL32(pSecHdr->PointerToRawData) + vaExpTable - VAL32(pSecHdr->VirtualAddress)); #ifdef _DEBUG sprintf_s(szString,SZSTRING_SIZE,"// in section '%s': VA=%X Misc.VS=%X PRD=%X ",(char*)(pSecHdr->Name), VAL32(pSecHdr->VirtualAddress),VAL32(pSecHdr->Misc.VirtualSize),VAL32(pSecHdr->PointerToRawData)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// Export Directory Table:"); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwFlags = %X",VAL32(pExpTable->dwFlags)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwDateTime = %X",VAL32(pExpTable->dwDateTime)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// wVMajor = %X",VAL16(pExpTable->wVMajor)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// wVMinor = %X",VAL16(pExpTable->wVMinor)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwNameRVA = %X",VAL32(pExpTable->dwNameRVA)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwOrdinalBase = %X",VAL32(pExpTable->dwOrdinalBase)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwNumATEntries = %X",VAL32(pExpTable->dwNumATEntries)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwNumNamePtrs = %X",VAL32(pExpTable->dwNumNamePtrs)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwAddrTableRVA = %X",VAL32(pExpTable->dwAddrTableRVA)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwNamePtrRVA = %X",VAL32(pExpTable->dwNamePtrRVA)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"// dwOrdTableRVA = %X",VAL32(pExpTable->dwOrdTableRVA)); printLine(GUICookie,szString); if(pExpTable->dwNameRVA) { char* szName; if(g_pPELoader->getVAforRVA(VAL32(pExpTable->dwNameRVA), (void **) &szName)) sprintf_s(szString,SZSTRING_SIZE,"// DLL Name: '%s'",szName); else sprintf_s(szString,SZSTRING_SIZE,"// DLL Name: BAD RVA: 0x%8.8X",VAL32(pExpTable->dwNameRVA)); printLine(GUICookie,szString); } #endif if(pExpTable->dwNumATEntries && pExpTable->dwAddrTableRVA) { DWORD* pExpAddr = NULL; BYTE *pCont = NULL; DWORD dwTokRVA; mdToken* pTok; g_pPELoader->getVAforRVA(VAL32(pExpTable->dwAddrTableRVA), (void **) &pExpAddr); #ifdef _DEBUG sprintf_s(szString,SZSTRING_SIZE,"// Export Address Table:"); printLine(GUICookie,szString); #endif g_nEATableRef = VAL32(pExpTable->dwNumATEntries); if (g_prEATableRef == NULL) { g_prEATableRef = new DynamicArray<EATableRef>; } (*g_prEATableRef)[g_nEATableRef].tkTok = 0; // to avoid multiple reallocations of DynamicArray for(j=0; j < VAL32(pExpTable->dwNumATEntries); j++,pExpAddr++) { g_pPELoader->getVAforRVA(VAL32(*pExpAddr), (void **) &pCont); #ifdef _DEBUG sprintf_s(szString,SZSTRING_SIZE,"// [%d]: RVA=%X VA=%p(",j,VAL32(*pExpAddr),pCont); DumpByteArray(szString,pCont,16,GUICookie); printLine(GUICookie,szString); #endif (*g_prEATableRef)[j].tkTok = 0; if(g_pPELoader->IsPE32()) { dwTokRVA = VAL32(*((DWORD*)(pCont+2))); // first two bytes - JumpIndirect (0x25FF) dwTokRVA -= VAL32((DWORD)pOptHeader32->ImageBase); } else { ULONGLONG ullTokRVA; if(pNTHeader64->FileHeader.Machine == IMAGE_FILE_MACHINE_IA64) ullTokRVA = VAL64(*((ULONGLONG*)(pCont+8))); else ullTokRVA = VAL64(*((ULONGLONG*)(pCont+2))); dwTokRVA =(DWORD)(ullTokRVA - VAL64((DWORD)pOptHeader64->ImageBase)); } if(g_pPELoader->getVAforRVA(dwTokRVA,(void**)&pTok)) (*g_prEATableRef)[j].tkTok = VAL32(*pTok); (*g_prEATableRef)[j].pszName = NULL; } } if(pExpTable->dwNumNamePtrs && pExpTable->dwNamePtrRVA && pExpTable->dwOrdTableRVA) { DWORD* pNamePtr = NULL; WORD* pOrd = NULL; char* szName = NULL; g_pPELoader->getVAforRVA(VAL32(pExpTable->dwNamePtrRVA), (void **) &pNamePtr); g_pPELoader->getVAforRVA(VAL32(pExpTable->dwOrdTableRVA), (void **) &pOrd); #ifdef _DEBUG sprintf_s(szString,SZSTRING_SIZE,"// Export Names:"); printLine(GUICookie,szString); #endif for(j=0; j < VAL32(pExpTable->dwNumATEntries); j++,pNamePtr++,pOrd++) { g_pPELoader->getVAforRVA(VAL32(*pNamePtr), (void **) &szName); #ifdef _DEBUG sprintf_s(szString,SZSTRING_SIZE,"// [%d]: NamePtr=%X Ord=%X Name='%s'",j,VAL32(*pNamePtr),*pOrd,szName); printLine(GUICookie,szString); #endif (*g_prEATableRef)[VAL16(*pOrd)].pszName = szName; } } g_nEATableBase = pExpTable->dwOrdinalBase; break; } } #ifdef _DEBUG printLine(GUICookie,COMMENT((char*)-1)); // end multiline comment #endif } } } // helper to avoid mixing of SEH and stack objects with destructors void DumpEATEntriesWrapper(void* GUICookie, IMAGE_NT_HEADERS32 *pNTHeader32, IMAGE_OPTIONAL_HEADER32 *pOptHeader32, IMAGE_NT_HEADERS64 *pNTHeader64, IMAGE_OPTIONAL_HEADER64 *pOptHeader64) { PAL_CPP_TRY { DumpEATEntries(GUICookie, pNTHeader32, pOptHeader32, pNTHeader64, pOptHeader64); } PAL_CPP_CATCH_ALL { printError(GUICookie,"// ERROR READING EXPORT ADDRESS TABLE"); if (g_prEATableRef != NULL) { SDELETE(g_prEATableRef); } g_nEATableRef = 0; } PAL_CPP_ENDTRY } void DumpVtable(void* GUICookie) { // VTable : primary processing DWORD pVTable=0; VTableEntry* pVTE; DWORD i,j,k; char* szptr; IMAGE_NT_HEADERS32 *pNTHeader32 = NULL; IMAGE_OPTIONAL_HEADER32 *pOptHeader32 = NULL; IMAGE_NT_HEADERS64 *pNTHeader64 = NULL; IMAGE_OPTIONAL_HEADER64 *pOptHeader64 = NULL; if (g_pPELoader->IsPE32()) { pNTHeader32 = g_pPELoader->ntHeaders32(); pOptHeader32 = &pNTHeader32->OptionalHeader; sprintf_s(szString,SZSTRING_SIZE,"%s%s 0x%08x", g_szAsmCodeIndent,KEYWORD(".imagebase"),VAL32(pOptHeader32->ImageBase)); printLine(GUICookie,szString); j = VAL16(pOptHeader32->Subsystem); sprintf_s(szString,SZSTRING_SIZE,"%s%s 0x%08x", g_szAsmCodeIndent,KEYWORD(".file alignment"),VAL32(pOptHeader32->FileAlignment)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"%s%s 0x%08x", g_szAsmCodeIndent,KEYWORD(".stackreserve"),VAL32(pOptHeader32->SizeOfStackReserve)); printLine(GUICookie,szString); } else { pNTHeader64 = g_pPELoader->ntHeaders64(); pOptHeader64 = &pNTHeader64->OptionalHeader; sprintf_s(szString,SZSTRING_SIZE,"%s%s 0x%016I64x", g_szAsmCodeIndent,KEYWORD(".imagebase"),VAL64(pOptHeader64->ImageBase)); printLine(GUICookie,szString); j = VAL16(pOptHeader64->Subsystem); sprintf_s(szString,SZSTRING_SIZE,"%s%s 0x%08x", g_szAsmCodeIndent,KEYWORD(".file alignment"),VAL32(pOptHeader64->FileAlignment)); printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"%s%s 0x%016I64x", g_szAsmCodeIndent,KEYWORD(".stackreserve"),VAL64(pOptHeader64->SizeOfStackReserve)); printLine(GUICookie,szString); } szptr = &szString[0]; szptr += sprintf_s(szString,SZSTRING_SIZE,"%s%s 0x%04x", g_szAsmCodeIndent,KEYWORD(".subsystem"),j); { const char* psz[15] = {"// UNKNOWN", "// NATIVE", "// WINDOWS_GUI", "// WINDOWS_CUI", "// <illegal value>", "// OS2_CUI", "// <illegal value>", "// POSIX_CUI", "// NATIVE_WINDOWS", "// WINDOWS_CE_GUI", "// EFI_APPLICATION", "// EFI_BOOT_SERVICE_DRIVER", "// EFI_RUNTIME_DRIVER", "// EFI_ROM", "// XBOX" }; if(j > 14) j = 4; // <illegal value> sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," %s",COMMENT(psz[j])); } printLine(GUICookie,szString); szptr = &szString[0]; i = (DWORD)VAL32(g_CORHeader->Flags); szptr += sprintf_s(szString,SZSTRING_SIZE,"%s%s 0x%08x", g_szAsmCodeIndent,KEYWORD(".corflags"),i); if(i != 0) { char sz[256], *szp = sz; szp += sprintf_s(szp,256," // "); if(i & COMIMAGE_FLAGS_ILONLY) szp += sprintf_s(szp,256-(szp-sz)," ILONLY"); if(COR_IS_32BIT_REQUIRED(i)) szp += sprintf_s(szp,256-(szp-sz)," 32BITREQUIRED"); if(COR_IS_32BIT_PREFERRED(i)) szp += sprintf_s(szp,256-(szp-sz)," 32BITPREFERRED"); if(i & COMIMAGE_FLAGS_IL_LIBRARY) szp += sprintf_s(szp,256-(szp-sz)," IL_LIBRARY"); if(i & COMIMAGE_FLAGS_TRACKDEBUGDATA) szp += sprintf_s(szp,256-(szp-sz)," TRACKDEBUGDATA"); szptr += sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT(sz)); } printLine(GUICookie,szString); sprintf_s(szString,SZSTRING_SIZE,"%s// Image base: 0x%p",g_szAsmCodeIndent,g_pPELoader->base()); printLine(GUICookie,COMMENT(szString)); DumpEATEntriesWrapper(GUICookie, pNTHeader32, pOptHeader32, pNTHeader64, pOptHeader64); g_nVTableRef = 0; if(VAL32(g_CORHeader->VTableFixups.Size)) { IMAGE_SECTION_HEADER *pSecHdr = NULL; DWORD dwNumberOfSections; if (g_pPELoader->IsPE32()) { pSecHdr = IMAGE_FIRST_SECTION(g_pPELoader->ntHeaders32()); dwNumberOfSections = VAL16(g_pPELoader->ntHeaders32()->FileHeader.NumberOfSections); } else { pSecHdr = IMAGE_FIRST_SECTION(g_pPELoader->ntHeaders64()); dwNumberOfSections = VAL16(g_pPELoader->ntHeaders64()->FileHeader.NumberOfSections); } pVTable = VAL32(g_CORHeader->VTableFixups.VirtualAddress); for (i=0; i < dwNumberOfSections; i++,pSecHdr++) { if(((DWORD)pVTable >= VAL32(pSecHdr->VirtualAddress))&& ((DWORD)pVTable < VAL32(pSecHdr->VirtualAddress)+VAL32(pSecHdr->Misc.VirtualSize))) { pVTE = (VTableEntry*)( g_pPELoader->base() + VAL32(pSecHdr->PointerToRawData) + pVTable - VAL32(pSecHdr->VirtualAddress)); for(j=VAL32(g_CORHeader->VTableFixups.Size),k=0; j > 0; pVTE++, j-=sizeof(VTableEntry),k++) { szptr = &szString[0]; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s [%d] ",g_szAsmCodeIndent,KEYWORD(".vtfixup"),VAL16(pVTE->wCount)); DWORD dwSize = VAL16(pVTE->wCount) * 4; WORD wType = VAL16(pVTE->wType); if(wType & COR_VTABLE_32BIT) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("int32 ")); else if(wType & COR_VTABLE_64BIT) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("int64 ")); dwSize <<= 1; } if(wType & COR_VTABLE_FROM_UNMANAGED) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("fromunmanaged ")); if(wType & COR_VTABLE_CALL_MOST_DERIVED) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("callmostderived ")); if(wType & 0x8 /*COR_VTABLE_FROM_UNMANAGED_RETAIN_APPDOMAIN*/) szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("retainappdomain ")); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("at ")); szptr = DumpDataPtr(szptr,VAL32(pVTE->dwAddr), dwSize); // Walk every v-table fixup entry and dump the slots. { BYTE *pSlot; if (g_pPELoader->getVAforRVA(VAL32(pVTE->dwAddr), (void **) &pSlot)) { char* szptr0 = szptr; szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," //"); for (WORD iSlot=0; iSlot<VAL16(pVTE->wCount); iSlot++) { mdMethodDef tkMethod = VAL32(*(DWORD *) pSlot); if (VAL16(pVTE->wType) & COR_VTABLE_32BIT) { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," %08X", VAL32(*(DWORD *)pSlot)); pSlot += sizeof(DWORD); } else { szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," %016I64X", VAL64(*(unsigned __int64 *)pSlot)); pSlot += sizeof(unsigned __int64); } if (g_prVTableRef == NULL) { g_prVTableRef = new DynamicArray<VTableRef>; } (*g_prVTableRef)[g_nVTableRef].tkTok = tkMethod; (*g_prVTableRef)[g_nVTableRef].wEntry = (WORD)k; (*g_prVTableRef)[g_nVTableRef].wSlot = iSlot; g_nVTableRef++; //ValidateToken(tkMethod, mdtMethodDef); } sprintf_s(szptr0,SZSTRING_REMAINING_SIZE(szptr0),COMMENT(szptr0)); } else szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr)," %s",ERRORMSG(RstrUTF(IDS_E_BOGUSRVA))); } printLine(GUICookie,szString); } break; } } } } // MetaInfo integration: void DumpMI(_In_ __nullterminated const char *str) { static BOOL fInit = TRUE; static char* szStr = &szString[0]; static void* GUICookie; char* pch; // Reset if(str == (char*)-1) { fInit = TRUE; return; } // Init if(fInit) { strcpy_s(szString,5,"// "); fInit = FALSE; GUICookie = (void*)str; return; } // Normal work strcat_s(szString,SZSTRING_SIZE,str); if((pch = strchr(szStr,'\n'))) { *pch = 0; printLine(GUICookie,szStr); pch++; memcpy(&szString[3], pch, strlen(pch)+1); } } void DumpMetaInfo(_In_ __nullterminated const WCHAR* pwzFileName, _In_opt_z_ const char* pszObjFileName, void* GUICookie) { const WCHAR* pch = wcsrchr(pwzFileName,L'.'); DumpMI((char*)GUICookie); // initialize the print function for DumpMetaInfo if(pch && (!_wcsicmp(pch+1,W("lib")) || !_wcsicmp(pch+1,W("obj")))) { // This works only when all the rest does not // Init and run. if (MetaDataGetDispenser(CLSID_CorMetaDataDispenser, IID_IMetaDataDispenserEx, (void **)&g_pDisp)) { WCHAR *pwzObjFileName=NULL; if (pszObjFileName) { int nLength = (int) strlen(pszObjFileName)+1; pwzObjFileName = new WCHAR[nLength]; memset(pwzObjFileName,0,sizeof(WCHAR)*nLength); WszMultiByteToWideChar(CP_UTF8,0,pszObjFileName,-1,pwzObjFileName,nLength); } DisplayFile((WCHAR*)pwzFileName, true, g_ulMetaInfoFilter, pwzObjFileName, DumpMI); g_pDisp->Release(); g_pDisp = NULL; if (pwzObjFileName) VDELETE(pwzObjFileName); } } else { HRESULT hr = S_OK; if(g_pDisp == NULL) { hr = MetaDataGetDispenser(CLSID_CorMetaDataDispenser, IID_IMetaDataDispenserEx, (void **)&g_pDisp); } if(SUCCEEDED(hr)) { g_ValModuleType = ValidatorModuleTypePE; if(g_pAssemblyImport==NULL) g_pAssemblyImport = GetAssemblyImport(NULL); printLine(GUICookie,RstrUTF(IDS_E_MISTART)); //MDInfo metaDataInfo(g_pPubImport, g_pAssemblyImport, (LPCWSTR)pwzFileName, DumpMI, g_ulMetaInfoFilter); MDInfo metaDataInfo(g_pDisp,(LPCWSTR)pwzFileName, DumpMI, g_ulMetaInfoFilter); metaDataInfo.DisplayMD(); printLine(GUICookie,RstrUTF(IDS_E_MIEND)); } } DumpMI((char*)-1); // reset the print function for DumpMetaInfo } void DumpPreamble() { printLine(g_pFile,""); if(g_fDumpHTML) { printLine(g_pFile, "<FONT SIZE=4><B>"); } else if(g_fDumpRTF) { } sprintf_s(szString,SZSTRING_SIZE,"// Microsoft (R) .NET IL Disassembler. Version " CLR_PRODUCT_VERSION); printLine(g_pFile,COMMENT(szString)); if(g_fDumpHTML) { printLine(g_pFile, "</B></FONT>"); } else if(g_fDumpRTF) { } printLine(g_pFile,""); if(g_fLimitedVisibility || (!g_fShowCA) || (!g_fDumpAsmCode) || (g_Mode & (MODE_DUMP_CLASS | MODE_DUMP_CLASS_METHOD | MODE_DUMP_CLASS_METHOD_SIG))) { printLine(g_pFile,""); printLine(g_pFile,COMMENT(RstrUTF(IDS_E_PARTDASM))); printLine(g_pFile,""); } if(g_fLimitedVisibility) { strcpy_s(szString, SZSTRING_SIZE, RstrUTF(IDS_E_ONLYITEMS)); if(!g_fHidePub) strcat_s(szString, SZSTRING_SIZE," Public"); if(!g_fHidePriv) strcat_s(szString, SZSTRING_SIZE," Private"); if(!g_fHideFam) strcat_s(szString, SZSTRING_SIZE," Family"); if(!g_fHideAsm) strcat_s(szString, SZSTRING_SIZE," Assembly"); if(!g_fHideFAA) strcat_s(szString, SZSTRING_SIZE," FamilyANDAssembly"); if(!g_fHidePrivScope) strcat_s(szString, SZSTRING_SIZE," PrivateScope"); printLine(g_pFile,COMMENT(szString)); } } void DumpSummary() { ULONG i; const char *pcClass,*pcNS,*pcMember, *pcSig; char szFQN[4096]; HENUMInternal hEnum; mdToken tkMember; CQuickBytes qbMemberSig; PCCOR_SIGNATURE pComSig; ULONG cComSig; DWORD dwAttrs; mdToken tkEventType; printLine(g_pFile,"//============ S U M M A R Y ================================="); if (SUCCEEDED(g_pImport->EnumGlobalFunctionsInit(&hEnum))) { while(g_pImport->EnumNext(&hEnum, &tkMember)) { if (FAILED(g_pImport->GetNameOfMethodDef(tkMember, &pcMember)) || FAILED(g_pImport->GetSigOfMethodDef(tkMember, &cComSig, &pComSig))) { sprintf_s(szString, SZSTRING_SIZE, "// ERROR in the method record %08X", tkMember); printLine(g_pFile, szString); continue; } qbMemberSig.Shrink(0); pcSig = cComSig ? PrettyPrintSig(pComSig, cComSig, "", &qbMemberSig, g_pImport,NULL) : "NO SIGNATURE"; PREFIX_ASSUME(ProperName((char*)pcMember) != 0); sprintf_s(szString,SZSTRING_SIZE,"// %08X [GLM] %s : %s", tkMember,ProperName((char*)pcMember),pcSig); printLine(g_pFile,szString); } } g_pImport->EnumClose(&hEnum); if (SUCCEEDED(g_pImport->EnumGlobalFieldsInit(&hEnum))) { while(g_pImport->EnumNext(&hEnum, &tkMember)) { if (FAILED(g_pImport->GetNameOfFieldDef(tkMember, &pcMember)) || FAILED(g_pImport->GetSigOfFieldDef(tkMember, &cComSig, &pComSig))) { sprintf_s(szString, SZSTRING_SIZE, "// ERROR in the field record %08X", tkMember); printLine(g_pFile, szString); continue; } qbMemberSig.Shrink(0); pcSig = cComSig ? PrettyPrintSig(pComSig, cComSig, "", &qbMemberSig, g_pImport,NULL) : "NO SIGNATURE"; PREFIX_ASSUME(ProperName((char*)pcMember) != 0); sprintf_s(szString,SZSTRING_SIZE,"// %08X [GLF] %s : %s", tkMember,ProperName((char*)pcMember),pcSig); printLine(g_pFile,szString); } } g_pImport->EnumClose(&hEnum); for (i = 0; i < g_NumClasses; i++) { if (FAILED(g_pImport->GetNameOfTypeDef(g_cl_list[i], &pcClass, &pcNS))) { sprintf_s(szString, SZSTRING_SIZE, "// ERROR in the TypeDef record %08X", g_cl_list[i]); printLine(g_pFile, szString); continue; } PREFIX_ASSUME(ProperName((char*)pcClass) != 0); if(*pcNS) sprintf_s(szFQN,4096,"%s.%s", ProperName((char*)pcNS),ProperName((char*)pcClass)); else strcpy_s(szFQN,4096,ProperName((char*)pcClass)); sprintf_s(szString,SZSTRING_SIZE,"// %08X [CLS] %s", g_cl_list[i],szFQN); printLine(g_pFile,szString); if(SUCCEEDED(g_pImport->EnumInit(mdtMethodDef, g_cl_list[i], &hEnum))) { while(g_pImport->EnumNext(&hEnum, &tkMember)) { if (FAILED(g_pImport->GetNameOfMethodDef(tkMember, &pcMember)) || FAILED(g_pImport->GetSigOfMethodDef(tkMember, &cComSig, &pComSig))) { sprintf_s(szString, SZSTRING_SIZE, "// ERROR in the method record %08X", tkMember); printLine(g_pFile, szString); continue; } qbMemberSig.Shrink(0); pcSig = cComSig ? PrettyPrintSig(pComSig, cComSig, "", &qbMemberSig, g_pImport,NULL) : "NO SIGNATURE"; PREFIX_ASSUME(ProperName((char*)pcMember) != 0); sprintf_s(szString,SZSTRING_SIZE,"// %08X [MET] %s::%s : %s", tkMember,szFQN,ProperName((char*)pcMember),pcSig); printLine(g_pFile,szString); } } g_pImport->EnumClose(&hEnum); if(SUCCEEDED(g_pImport->EnumInit(mdtFieldDef, g_cl_list[i], &hEnum))) { while(g_pImport->EnumNext(&hEnum, &tkMember)) { if (FAILED(g_pImport->GetNameOfFieldDef(tkMember, &pcMember)) || FAILED(g_pImport->GetSigOfFieldDef(tkMember, &cComSig, &pComSig))) { sprintf_s(szString, SZSTRING_SIZE, "// ERROR in the field record %08X", tkMember); printLine(g_pFile, szString); continue; } qbMemberSig.Shrink(0); pcSig = cComSig ? PrettyPrintSig(pComSig, cComSig, "", &qbMemberSig, g_pImport,NULL) : "NO SIGNATURE"; PREFIX_ASSUME(ProperName((char*)pcMember) != 0); sprintf_s(szString,SZSTRING_SIZE,"// %08X [FLD] %s::%s : %s", tkMember,szFQN,ProperName((char*)pcMember),pcSig); printLine(g_pFile,szString); } } g_pImport->EnumClose(&hEnum); if(SUCCEEDED(g_pImport->EnumInit(mdtEvent, g_cl_list[i], &hEnum))) { while(g_pImport->EnumNext(&hEnum, &tkMember)) { if (FAILED(g_pImport->GetEventProps(tkMember,&pcMember,&dwAttrs,&tkEventType))) { sprintf_s(szString, SZSTRING_SIZE, "// Invalid Event %08X record", tkMember); printLine(g_pFile, szString); continue; } qbMemberSig.Shrink(0); pcSig = "NO TYPE"; if(RidFromToken(tkEventType)) { switch(TypeFromToken(tkEventType)) { case mdtTypeRef: case mdtTypeDef: case mdtTypeSpec: pcSig = PrettyPrintClass(&qbMemberSig,tkEventType,g_pImport); break; default: break; } } PREFIX_ASSUME(ProperName((char*)pcMember) != 0); sprintf_s(szString,SZSTRING_SIZE,"// %08X [EVT] %s::%s : %s", tkMember,szFQN,ProperName((char*)pcMember),pcSig); printLine(g_pFile,szString); } } g_pImport->EnumClose(&hEnum); if(SUCCEEDED(g_pImport->EnumInit(mdtProperty, g_cl_list[i], &hEnum))) { while(g_pImport->EnumNext(&hEnum, &tkMember)) { if (FAILED(g_pImport->GetPropertyProps(tkMember,&pcMember,&dwAttrs,&pComSig,&cComSig))) { sprintf_s(szString, SZSTRING_SIZE, "// Invalid Property %08X record", tkMember); printLine(g_pFile, szString); continue; } qbMemberSig.Shrink(0); pcSig = cComSig ? PrettyPrintSig(pComSig, cComSig, "", &qbMemberSig, g_pImport,NULL) : "NO SIGNATURE"; PREFIX_ASSUME(ProperName((char*)pcMember) != 0); sprintf_s(szString,SZSTRING_SIZE,"// %08X [PRO] %s::%s : %s", tkMember,szFQN,ProperName((char*)pcMember),pcSig); printLine(g_pFile,szString); } } g_pImport->EnumClose(&hEnum); } printLine(g_pFile,"//=============== END SUMMARY =================================="); } void DumpRTFPrefix(void* GUICookie,BOOL fFontDefault) { g_fDumpRTF = FALSE; printLine(GUICookie,"{\\rtf1\\ansi"); if(fFontDefault) printLine(GUICookie,"{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset1 Courier New;}{\\f1\\fswiss\\fcharset1 Arial;}}"); printLine(GUICookie,"{\\colortbl ;\\red0\\green0\\blue128;\\red0\\green128\\blue0;\\red255\\green0\\blue0;}"); printLine(GUICookie,"\\viewkind4\\uc1\\pard\\f0\\fs20"); g_fDumpRTF = TRUE; } void DumpRTFPostfix(void* GUICookie) { g_fDumpRTF = FALSE; printLine(GUICookie,"}"); g_fDumpRTF = TRUE; } mdToken ClassOf(mdToken tok) { mdToken retval=0; switch(TypeFromToken(tok)) { case mdtTypeDef: case mdtTypeRef: case mdtTypeSpec: retval = tok; break; case mdtFieldDef: case mdtMethodDef: case mdtMemberRef: if (FAILED(g_pImport->GetParentToken(tok, &retval))) { retval = mdTokenNil; } else { retval = ClassOf(retval); } break; default: break; } return retval; } void DumpRefs(BOOL fClassesOnly) { CQuickBytes out; DynamicArray<TokPair> *refs = g_refs; TokPair *newrefs = NULL; mdToken tkThisUser,tkThisRef; mdToken tkLastUser = 0xFFFFFFFF, tkLastRef=0xFFFFFFFF; DWORD i=0,j=0; g_refs = NULL; printLine(g_pFile,COMMENT((char*)0)); printLine(g_pFile,"//============ R E F E R E N C E S ==========================="); strcpy_s(g_szAsmCodeIndent,MAX_MEMBER_LENGTH,"// "); if(fClassesOnly && g_NumRefs) { if((newrefs = new TokPair[g_NumRefs])) { for(i=0; i<g_NumRefs; i++) { newrefs[i].tkUser = tkThisUser = ClassOf((*refs)[i].tkUser); newrefs[i].tkRef = tkThisRef = ClassOf((*refs)[i].tkRef); if(!tkThisUser) continue; if(!tkThisRef) continue; if(tkThisUser == tkThisRef) continue; for(j = 0; j<i; j++) { if((newrefs[j].tkUser==tkThisUser)&&(newrefs[j].tkRef==tkThisRef)) { newrefs[i].tkRef = 0; break; } } } } else fClassesOnly = FALSE; } for(i = 0; i <g_NumRefs; i++) { if(fClassesOnly) { tkThisUser = newrefs[i].tkUser; tkThisRef = newrefs[i].tkRef; } else { tkThisUser = (*refs)[i].tkUser; tkThisRef = (*refs)[i].tkRef; } if(!tkThisUser) continue; if(!tkThisRef) continue; if(tkThisUser == tkThisRef) continue; if((tkThisUser==tkLastUser)&&(tkThisRef==tkLastRef)) continue; strcpy_s(szString, SZSTRING_SIZE,g_szAsmCodeIndent); if(tkThisUser != tkLastUser) { PrettyPrintToken(szString, tkThisUser, g_pImport,g_pFile,0); //TypeDef,TypeRef,TypeSpec,MethodDef,FieldDef,MemberRef,MethodSpec,String strcat_s(szString, SZSTRING_SIZE, " references "); printLine(g_pFile,szString); tkLastUser = tkThisUser; } strcpy_s(szString, SZSTRING_SIZE,g_szAsmCodeIndent); strcat_s(szString, SZSTRING_SIZE," - "); PrettyPrintToken(szString, tkThisRef, g_pImport,g_pFile,0); //TypeDef,TypeRef,TypeSpec,MethodDef,FieldDef,MemberRef,MethodSpec,String printLine(g_pFile,szString); tkLastRef = tkThisRef; } printLine(g_pFile,"//=============== END REFERENCES ============================="); printLine(g_pFile,COMMENT((char*)-1)); g_refs = refs; if(newrefs) VDELETE(newrefs); } void CloseNamespace(__inout __nullterminated char* szString) { if(strlen(g_szNamespace)) { char* szptr = &szString[0]; if(g_szAsmCodeIndent[0]) g_szAsmCodeIndent[strlen(g_szAsmCodeIndent)-2] = 0; szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s%s ",g_szAsmCodeIndent, UNSCOPE()); szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),COMMENT("// end of namespace %s"),ProperName(g_szNamespace)); printLine(g_pFile,szString); printLine(g_pFile,""); g_szNamespace[0] = 0; } } FILE* OpenOutput(_In_ __nullterminated const WCHAR* wzFileName) { FILE* pfile = NULL; if(g_uCodePage == 0xFFFFFFFF) _wfopen_s(&pfile,wzFileName,W("wb")); else _wfopen_s(&pfile,wzFileName,W("wt")); if(pfile) { if(g_uCodePage == CP_UTF8) fwrite("\357\273\277",3,1,pfile); else if(g_uCodePage == 0xFFFFFFFF) fwrite("\377\376",2,1,pfile); } return pfile; } FILE* OpenOutput(_In_ __nullterminated const char* szFileName) { return OpenOutput(UtfToUnicode(szFileName)); } // // Init PELoader, dump file header info // #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:21000) // Suppress PREFast warning about overly large function #endif BOOL DumpFile() { BOOL fSuccess = FALSE; static WCHAR wzInputFileName[MAX_FILENAME_LENGTH]; static char szFilenameANSI[MAX_FILENAME_LENGTH*3]; IMetaDataDispenser *pMetaDataDispenser = NULL; const char *pszFilename = g_szInputFile; const DWORD openFlags = ofRead | (g_fProject ? 0 : ofNoTransform); { if(g_fDumpHTML) { printLine(g_pFile, "<HTML>"); printLine(g_pFile, "<HEAD>"); sprintf_s(szString,SZSTRING_SIZE,"<TITLE> %s - IL DASM</TITLE>",g_szInputFile); printLine(g_pFile, szString); printLine(g_pFile, "</HEAD>"); printLine(g_pFile, "<BODY>"); printLine(g_pFile, "<FONT SIZE=3 FACE=\"Arial\">"); printLine(g_pFile, "<PRE>"); } else if(g_fDumpRTF) { DumpRTFPrefix(g_pFile,TRUE); } DumpPreamble(); } { char* pch = strrchr(g_szInputFile,'.'); if(pch && (!_stricmp(pch+1,"lib") || !_stricmp(pch+1,"obj"))) { DumpMetaInfo(g_wszFullInputFile,g_pszObjFileName,g_pFile); return FALSE; } } if(g_pPELoader) goto DoneInitialization; // skip initialization, it's already done g_pPELoader = new PELoader(); if (g_pPELoader == NULL) { printError(g_pFile,RstrUTF(IDS_E_INITLDR)); goto exit; } memset(wzInputFileName,0,sizeof(WCHAR)*MAX_FILENAME_LENGTH); WszMultiByteToWideChar(CP_UTF8,0,pszFilename,-1,wzInputFileName,MAX_FILENAME_LENGTH); memset(szFilenameANSI,0,MAX_FILENAME_LENGTH*3); WszWideCharToMultiByte(g_uConsoleCP,0,wzInputFileName,-1,szFilenameANSI,MAX_FILENAME_LENGTH*3,NULL,NULL); fSuccess = g_pPELoader->open(wzInputFileName); if (fSuccess == FALSE) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_FILEOPEN), pszFilename); printError(g_pFile,szString); SDELETE(g_pPELoader); g_pPELoader = NULL; goto exit; } fSuccess = FALSE; if (g_pPELoader->getCOMHeader(&g_CORHeader) == FALSE) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_NOCORHDR), pszFilename); printError(g_pFile,szString); if (g_fDumpHeader) DumpHeader(g_CORHeader,g_pFile); goto exit; } if (VAL16(g_CORHeader->MajorRuntimeVersion) == 1 || VAL16(g_CORHeader->MajorRuntimeVersion) > COR_VERSION_MAJOR) { sprintf_s(szString,SZSTRING_SIZE,"CORHeader->MajorRuntimeVersion = %d",VAL16(g_CORHeader->MajorRuntimeVersion)); printError(g_pFile,szString); printError(g_pFile,RstrUTF(IDS_E_BADCORHDR)); goto exit; } g_tkEntryPoint = VAL32(IMAGE_COR20_HEADER_FIELD(*g_CORHeader, EntryPointToken)); // integration with MetaInfo { if (g_pPELoader->getVAforRVA(VAL32(g_CORHeader->MetaData.VirtualAddress),&g_pMetaData) == FALSE) { printError(g_pFile, RstrUTF(IDS_E_OPENMD)); if (g_fDumpHeader) DumpHeader(g_CORHeader, g_pFile); goto exit; } g_cbMetaData = VAL32(g_CORHeader->MetaData.Size); } if (FAILED(GetMetaDataInternalInterface( (BYTE *)g_pMetaData, g_cbMetaData, openFlags, IID_IMDInternalImport, (LPVOID *)&g_pImport))) { if (g_fDumpHeader) DumpHeader(g_CORHeader, g_pFile); printError(g_pFile, RstrUTF(IDS_E_OPENMD)); goto exit; } TokenSigInit(g_pImport); if (FAILED(MetaDataGetDispenser(CLSID_CorMetaDataDispenser, IID_IMetaDataDispenser, (LPVOID*)&pMetaDataDispenser))) { if (g_fDumpHeader) DumpHeader(g_CORHeader, g_pFile); printError(g_pFile, RstrUTF(IDS_E_OPENMD)); goto exit; } if (FAILED(pMetaDataDispenser->OpenScopeOnMemory(g_pMetaData, g_cbMetaData, openFlags, IID_IMetaDataImport2, (LPUNKNOWN *)&g_pPubImport ))) { if (g_fDumpHeader) DumpHeader(g_CORHeader, g_pFile); printError(g_pFile, RstrUTF(IDS_E_OPENMD)); goto exit; } if((g_uNCA = g_pImport->GetCountWithTokenKind(mdtCustomAttribute))) { g_rchCA = new char[g_uNCA+1]; _ASSERTE(g_rchCA); } EnumClasses(); EnumTypedefs(); DoneInitialization: if(g_uNCA) { _ASSERTE(g_rchCA); memset(g_rchCA,0,g_uNCA+1); } { // Dump the CLR header info if requested. printLine(g_pFile,COMMENT((char*)0)); // start multiline comment if (g_fDumpHeader) { DumpHeader(g_CORHeader,g_pFile); DumpHeaderDetails(g_CORHeader,g_pFile); } else DumpVTables(g_CORHeader,g_pFile); if (g_fDumpStats) DumpStatistics(g_CORHeader,g_pFile); if(g_fDumpClassList) PrintClassList(); // MetaInfo integration: if(g_fDumpMetaInfo) DumpMetaInfo(g_wszFullInputFile,NULL,g_pFile); if(g_fDumpSummary) DumpSummary(); printLine(g_pFile,COMMENT((char*)-1)); // end multiline comment if(g_fShowRefs) g_refs = new DynamicArray<TokPair>; if (g_fDumpAsmCode) { g_szNamespace[0] = 0; if(g_tkClassToDump) //g_tkClassToDump is set in EnumClasses { DumpClass(TopEncloser(g_tkClassToDump), VAL32(IMAGE_COR20_HEADER_FIELD(*g_CORHeader, EntryPointToken)),g_pFile,7); //7-dump everything at once CloseNamespace(szString); goto ReportAndExit; } { HENUMInternal hEnumMethod; ULONG ulNumGlobalFunc=0; if (SUCCEEDED(g_pImport->EnumGlobalFunctionsInit(&hEnumMethod))) { ulNumGlobalFunc = g_pImport->EnumGetCount(&hEnumMethod); g_pImport->EnumClose(&hEnumMethod); } } //DumpVtable(g_pFile); DumpMscorlib(g_pFile); if(g_fDumpTypeList) DumpTypelist(g_pFile); DumpManifest(g_pFile); DumpTypedefs(g_pFile); /* First dump the classes w/o members*/ if(g_fForwardDecl && g_NumClasses) { printLine(g_pFile,COMMENT("//")); printLine(g_pFile,COMMENT("// ============== CLASS STRUCTURE DECLARATION ==================")); printLine(g_pFile,COMMENT("//")); for (DWORD i = 0; i < g_NumClasses; i++) { if(g_cl_enclosing[i] == mdTypeDefNil) // nested classes are dumped within enclosing ones { DumpClass(g_cl_list[i], VAL32(IMAGE_COR20_HEADER_FIELD(*g_CORHeader, EntryPointToken)),g_pFile,2); // 2=header+nested classes } } CloseNamespace(szString); printLine(g_pFile,""); printLine(g_pFile,COMMENT("// =============================================================")); printLine(g_pFile,""); } /* Second, dump the global fields and methods */ DumpGlobalFields(); DumpGlobalMethods(VAL32(IMAGE_COR20_HEADER_FIELD(*g_CORHeader, EntryPointToken))); /* Third, dump the classes with members */ if(g_NumClasses) { printLine(g_pFile,""); printLine(g_pFile,COMMENT("// =============== CLASS MEMBERS DECLARATION ===================")); if(g_fForwardDecl) { printLine(g_pFile,COMMENT("// note that class flags, 'extends' and 'implements' clauses")); printLine(g_pFile,COMMENT("// are provided here for information only")); } printLine(g_pFile,""); for (DWORD i = 0; i < g_NumClasses; i++) { if(g_cl_enclosing[i] == mdTypeDefNil) // nested classes are dumped within enclosing ones { DumpClass(g_cl_list[i], VAL32(IMAGE_COR20_HEADER_FIELD(*g_CORHeader, EntryPointToken)),g_pFile,7); //7=everything } } CloseNamespace(szString); printLine(g_pFile,""); printLine(g_pFile,COMMENT("// =============================================================")); printLine(g_pFile,""); } if(g_fShowCA) { if(g_uNCA) _ASSERTE(g_rchCA); for(DWORD i=1; i<= g_uNCA; i++) { if(g_rchCA[i] == 0) DumpCustomAttribute(TokenFromRid(i,mdtCustomAttribute),g_pFile,true); } } // If there were "ldptr", dump the .rdata section with labels if(g_iPtrCount) { //first, sort the pointers int i,j; bool swapped; do { swapped = FALSE; for(i = 1; i < g_iPtrCount; i++) { if((*g_pPtrTags)[i-1] > (*g_pPtrTags)[i]) { j = (*g_pPtrTags)[i-1]; (*g_pPtrTags)[i-1] = (*g_pPtrTags)[i]; (*g_pPtrTags)[i] = j; j = (*g_pPtrSize)[i-1]; (*g_pPtrSize)[i-1] = (*g_pPtrSize)[i]; (*g_pPtrSize)[i] = j; swapped = TRUE; } } } while(swapped); //second, dump data for each ptr as binarray IMAGE_SECTION_HEADER *pSecHdr = NULL; if(g_pPELoader->IsPE32()) pSecHdr = IMAGE_FIRST_SECTION(g_pPELoader->ntHeaders32()); else pSecHdr = IMAGE_FIRST_SECTION(g_pPELoader->ntHeaders64()); DWORD dwNumberOfSections; if(g_pPELoader->IsPE32()) dwNumberOfSections = VAL16(g_pPELoader->ntHeaders32()->FileHeader.NumberOfSections); else dwNumberOfSections = VAL16(g_pPELoader->ntHeaders64()->FileHeader.NumberOfSections); DWORD fromPtr,toPtr,limPtr; char* szptr; for(j = 0; j < g_iPtrCount; j++) { BYTE *pb; fromPtr = (*g_pPtrTags)[j]; for (i=0; i < (int)dwNumberOfSections; i++,pSecHdr++) { if((fromPtr >= VAL32(pSecHdr->VirtualAddress))&& (fromPtr < VAL32(pSecHdr->VirtualAddress)+VAL32(pSecHdr->Misc.VirtualSize))) break; } if(i == (int)dwNumberOfSections) { sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_ROGUEPTR), fromPtr); printLine(g_pFile,szString); break; } // OK, now we have the section; what about end of BLOB? const char* szTls = "D_"; if(strcmp((char*)(pSecHdr->Name),".tls")==0) szTls = "T_"; else if(strcmp((char*)(pSecHdr->Name),".text")==0) szTls = "I_"; if(j == g_iPtrCount-1) { toPtr = VAL32(pSecHdr->VirtualAddress)+VAL32(pSecHdr->Misc.VirtualSize); } else { toPtr = (*g_pPtrTags)[j+1]; if(toPtr > VAL32(pSecHdr->VirtualAddress)+VAL32(pSecHdr->Misc.VirtualSize)) { toPtr = VAL32(pSecHdr->VirtualAddress)+VAL32(pSecHdr->Misc.VirtualSize); } } if(toPtr - fromPtr > (*g_pPtrSize)[j]) toPtr = fromPtr + (*g_pPtrSize)[j]; limPtr = toPtr; // at limPtr and after, pad with 0 if(limPtr > VAL32(pSecHdr->VirtualAddress)+VAL32(pSecHdr->SizeOfRawData)) limPtr = VAL32(pSecHdr->VirtualAddress)+VAL32(pSecHdr->SizeOfRawData); PrintBlob: szptr = szString; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s%s ",g_szAsmCodeIndent,KEYWORD(".data")); if(*szTls=='T') szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("tls ")); else if(*szTls=='I') szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),KEYWORD("cil ")); if(fromPtr >= limPtr) { // uninitialized data sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s%8.8X = %s[%d]",szTls,fromPtr,KEYWORD("int8"),toPtr-fromPtr); printLine(g_pFile,szString); } else { // initialized data szptr+=sprintf_s(szptr,SZSTRING_REMAINING_SIZE(szptr),"%s%8.8X = %s (",szTls,fromPtr,KEYWORD("bytearray")); printLine(g_pFile,szString); szptr = szString; szptr+=sprintf_s(szptr,SZSTRING_SIZE,"%s ",g_szAsmCodeIndent); pb = g_pPELoader->base() + VAL32(pSecHdr->PointerToRawData) + fromPtr - VAL32(pSecHdr->VirtualAddress); // now fromPtr is the beginning of the BLOB, and toPtr is [exclusive] end of it DumpHexbytes(szptr, pb, fromPtr, toPtr, limPtr); } // to preserve alignment, dump filler if any if(limPtr == toPtr) // don't need filler if it's the last item in section { if((j < g_iPtrCount-1)&&(toPtr < (DWORD)((*g_pPtrTags)[j+1]))) { DWORD align; DWORD stptr = (DWORD)(*g_pPtrTags)[j+1]; for(align = 1; (align & stptr)==0; align = align << 1); align -= 1; if(toPtr & align) { fromPtr = toPtr; toPtr = (toPtr + align)&~align; goto PrintBlob; } } } } } ReportAndExit: printLine(g_pFile,COMMENT(RstrUTF(IDS_E_DASMOK))); fSuccess = TRUE; } fSuccess = TRUE; #ifndef TARGET_UNIX if(g_pFile) // dump .RES file (if any), if not to console { WCHAR wzResFileName[2048], *pwc; memset(wzResFileName,0,sizeof(wzResFileName)); WszMultiByteToWideChar(CP_UTF8,0,g_szOutputFile,-1,wzResFileName,2048); pwc = wcsrchr(wzResFileName,L'.'); if(pwc == NULL) pwc = &wzResFileName[wcslen(wzResFileName)]; wcscpy_s(pwc, 2048 - (pwc - wzResFileName), L".res"); DWORD ret = DumpResourceToFile(wzResFileName); switch(ret) { case 0: szString[0] = 0; break; case 1: sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_W_CREATEDW32RES)/*"// WARNING: Created Win32 resource file %ls"*/, UnicodeToUtf(wzResFileName)); break; case 0xDFFFFFFF: sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_CORRUPTW32RES)/*"// ERROR: Corrupt Win32 resources"*/); break; case 0xEFFFFFFF: sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_CANTOPENW32RES)/*"// ERROR: Unable to open file %ls"*/, UnicodeToUtf(wzResFileName)); break; case 0xFFFFFFFF: sprintf_s(szString,SZSTRING_SIZE,RstrUTF(IDS_E_CANTACCESSW32RES)/*"// ERROR: Unable to access Win32 resources"*/); break; } if(szString[0]) { if(ret == 1) printLine(g_pFile,COMMENT(szString)); else printError(g_pFile,szString); } } #endif if(g_fShowRefs) DumpRefs(TRUE); if(g_fDumpHTML) { printLine(g_pFile, "</PRE>"); printLine(g_pFile, "</BODY>"); printLine(g_pFile, "</HTML>"); } else if(g_fDumpRTF) { DumpRTFPostfix(g_pFile); } if(g_pFile) { fclose(g_pFile); g_pFile = NULL; } } exit: if (pMetaDataDispenser) pMetaDataDispenser->Release(); return fSuccess; } #ifdef _PREFAST_ #pragma warning(pop) #endif #ifdef _MSC_VER #pragma warning(default : 4640) #endif
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/native/corehost/fxr_resolver.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <pal.h> #include "fxr_resolver.h" #include <fx_ver.h> #include <trace.h> #include <utils.h> namespace { bool get_latest_fxr(pal::string_t fxr_root, pal::string_t* out_fxr_path) { trace::info(_X("Reading fx resolver directory=[%s]"), fxr_root.c_str()); std::vector<pal::string_t> list; pal::readdir_onlydirectories(fxr_root, &list); fx_ver_t max_ver; for (const auto& dir : list) { trace::info(_X("Considering fxr version=[%s]..."), dir.c_str()); pal::string_t ver = get_filename(dir); fx_ver_t fx_ver; if (fx_ver_t::parse(ver, &fx_ver, /* parse_only_production */ false)) { max_ver = std::max(max_ver, fx_ver); } } if (max_ver == fx_ver_t()) { trace::error(_X("A fatal error occurred, the folder [%s] does not contain any version-numbered child folders"), fxr_root.c_str()); return false; } pal::string_t max_ver_str = max_ver.as_str(); append_path(&fxr_root, max_ver_str.c_str()); trace::info(_X("Detected latest fxr version=[%s]..."), fxr_root.c_str()); if (library_exists_in_dir(fxr_root, LIBFXR_NAME, out_fxr_path)) { trace::info(_X("Resolved fxr [%s]..."), out_fxr_path->c_str()); return true; } trace::error(_X("A fatal error occurred, the required library %s could not be found in [%s]"), LIBFXR_NAME, fxr_root.c_str()); return false; } } bool fxr_resolver::try_get_path(const pal::string_t& root_path, pal::string_t* out_dotnet_root, pal::string_t* out_fxr_path) { #if defined(FEATURE_APPHOST) || defined(FEATURE_LIBHOST) // For apphost and libhost, root_path is expected to be a directory. // For libhost, it may be empty if app-local search is not desired (e.g. com/ijw/winrt hosts, nethost when no assembly path is specified) // If a hostfxr exists in root_path, then assume self-contained. if (root_path.length() > 0 && library_exists_in_dir(root_path, LIBFXR_NAME, out_fxr_path)) { trace::info(_X("Resolved fxr [%s]..."), out_fxr_path->c_str()); out_dotnet_root->assign(root_path); return true; } // For framework-dependent apps, use DOTNET_ROOT_<ARCH> pal::string_t default_install_location; pal::string_t dotnet_root_env_var_name; if (get_dotnet_root_from_env(&dotnet_root_env_var_name, out_dotnet_root)) { trace::info(_X("Using environment variable %s=[%s] as runtime location."), dotnet_root_env_var_name.c_str(), out_dotnet_root->c_str()); } else { if (pal::get_dotnet_self_registered_dir(&default_install_location) || pal::get_default_installation_dir(&default_install_location)) { trace::info(_X("Using global installation location [%s] as runtime location."), default_install_location.c_str()); out_dotnet_root->assign(default_install_location); } else { trace::error(_X("A fatal error occurred, the default install location cannot be obtained.")); return false; } } pal::string_t fxr_dir = *out_dotnet_root; append_path(&fxr_dir, _X("host")); append_path(&fxr_dir, _X("fxr")); if (!pal::directory_exists(fxr_dir)) { if (default_install_location.empty()) { pal::get_dotnet_self_registered_dir(&default_install_location); } if (default_install_location.empty()) { pal::get_default_installation_dir(&default_install_location); } pal::string_t self_registered_config_location = pal::get_dotnet_self_registered_config_location(); pal::string_t self_registered_message = _X(" or register the runtime location in [") + self_registered_config_location + _X("]"); trace::error(_X("A fatal error occurred. The required library %s could not be found.\n" "If this is a self-contained application, that library should exist in [%s].\n" "If this is a framework-dependent application, install the runtime in the global location [%s] or use the %s environment variable to specify the runtime location%s."), LIBFXR_NAME, root_path.c_str(), default_install_location.c_str(), dotnet_root_env_var_name.c_str(), self_registered_message.c_str()); trace::error(_X("")); trace::error(_X("The .NET runtime can be found at:")); trace::error(_X(" - %s&apphost_version=%s"), get_download_url().c_str(), _STRINGIFY(COMMON_HOST_PKG_VER)); return false; } return get_latest_fxr(std::move(fxr_dir), out_fxr_path); #else // !FEATURE_APPHOST && !FEATURE_LIBHOST // For non-apphost and non-libhost (i.e. muxer), root_path is expected to be the full path to the host pal::string_t host_dir; host_dir.assign(get_directory(root_path)); out_dotnet_root->assign(host_dir); return fxr_resolver::try_get_path_from_dotnet_root(*out_dotnet_root, out_fxr_path); #endif // !FEATURE_APPHOST && !FEATURE_LIBHOST } bool fxr_resolver::try_get_path_from_dotnet_root(const pal::string_t& dotnet_root, pal::string_t* out_fxr_path) { pal::string_t fxr_dir = dotnet_root; append_path(&fxr_dir, _X("host")); append_path(&fxr_dir, _X("fxr")); if (!pal::directory_exists(fxr_dir)) { trace::error(_X("A fatal error occurred. The folder [%s] does not exist"), fxr_dir.c_str()); return false; } return get_latest_fxr(std::move(fxr_dir), out_fxr_path); } bool fxr_resolver::try_get_existing_fxr(pal::dll_t* out_fxr, pal::string_t* out_fxr_path) { if (!pal::get_loaded_library(LIBFXR_NAME, "hostfxr_main", out_fxr, out_fxr_path)) return false; trace::verbose(_X("Found previously loaded library %s [%s]."), LIBFXR_NAME, out_fxr_path->c_str()); return true; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <pal.h> #include "fxr_resolver.h" #include <fx_ver.h> #include <trace.h> #include <utils.h> namespace { bool get_latest_fxr(pal::string_t fxr_root, pal::string_t* out_fxr_path) { trace::info(_X("Reading fx resolver directory=[%s]"), fxr_root.c_str()); std::vector<pal::string_t> list; pal::readdir_onlydirectories(fxr_root, &list); fx_ver_t max_ver; for (const auto& dir : list) { trace::info(_X("Considering fxr version=[%s]..."), dir.c_str()); pal::string_t ver = get_filename(dir); fx_ver_t fx_ver; if (fx_ver_t::parse(ver, &fx_ver, /* parse_only_production */ false)) { max_ver = std::max(max_ver, fx_ver); } } if (max_ver == fx_ver_t()) { trace::error(_X("A fatal error occurred, the folder [%s] does not contain any version-numbered child folders"), fxr_root.c_str()); return false; } pal::string_t max_ver_str = max_ver.as_str(); append_path(&fxr_root, max_ver_str.c_str()); trace::info(_X("Detected latest fxr version=[%s]..."), fxr_root.c_str()); if (library_exists_in_dir(fxr_root, LIBFXR_NAME, out_fxr_path)) { trace::info(_X("Resolved fxr [%s]..."), out_fxr_path->c_str()); return true; } trace::error(_X("A fatal error occurred, the required library %s could not be found in [%s]"), LIBFXR_NAME, fxr_root.c_str()); return false; } } bool fxr_resolver::try_get_path(const pal::string_t& root_path, pal::string_t* out_dotnet_root, pal::string_t* out_fxr_path) { #if defined(FEATURE_APPHOST) || defined(FEATURE_LIBHOST) // For apphost and libhost, root_path is expected to be a directory. // For libhost, it may be empty if app-local search is not desired (e.g. com/ijw/winrt hosts, nethost when no assembly path is specified) // If a hostfxr exists in root_path, then assume self-contained. if (root_path.length() > 0 && library_exists_in_dir(root_path, LIBFXR_NAME, out_fxr_path)) { trace::info(_X("Resolved fxr [%s]..."), out_fxr_path->c_str()); out_dotnet_root->assign(root_path); return true; } // For framework-dependent apps, use DOTNET_ROOT_<ARCH> pal::string_t default_install_location; pal::string_t dotnet_root_env_var_name; if (get_dotnet_root_from_env(&dotnet_root_env_var_name, out_dotnet_root)) { trace::info(_X("Using environment variable %s=[%s] as runtime location."), dotnet_root_env_var_name.c_str(), out_dotnet_root->c_str()); } else { if (pal::get_dotnet_self_registered_dir(&default_install_location) || pal::get_default_installation_dir(&default_install_location)) { trace::info(_X("Using global installation location [%s] as runtime location."), default_install_location.c_str()); out_dotnet_root->assign(default_install_location); } else { trace::error(_X("A fatal error occurred, the default install location cannot be obtained.")); return false; } } pal::string_t fxr_dir = *out_dotnet_root; append_path(&fxr_dir, _X("host")); append_path(&fxr_dir, _X("fxr")); if (!pal::directory_exists(fxr_dir)) { if (default_install_location.empty()) { pal::get_dotnet_self_registered_dir(&default_install_location); } if (default_install_location.empty()) { pal::get_default_installation_dir(&default_install_location); } pal::string_t self_registered_config_location = pal::get_dotnet_self_registered_config_location(); pal::string_t self_registered_message = _X(" or register the runtime location in [") + self_registered_config_location + _X("]"); trace::error(_X("A fatal error occurred. The required library %s could not be found.\n" "If this is a self-contained application, that library should exist in [%s].\n" "If this is a framework-dependent application, install the runtime in the global location [%s] or use the %s environment variable to specify the runtime location%s."), LIBFXR_NAME, root_path.c_str(), default_install_location.c_str(), dotnet_root_env_var_name.c_str(), self_registered_message.c_str()); trace::error(_X("")); trace::error(_X("The .NET runtime can be found at:")); trace::error(_X(" - %s&apphost_version=%s"), get_download_url().c_str(), _STRINGIFY(COMMON_HOST_PKG_VER)); return false; } return get_latest_fxr(std::move(fxr_dir), out_fxr_path); #else // !FEATURE_APPHOST && !FEATURE_LIBHOST // For non-apphost and non-libhost (i.e. muxer), root_path is expected to be the full path to the host pal::string_t host_dir; host_dir.assign(get_directory(root_path)); out_dotnet_root->assign(host_dir); return fxr_resolver::try_get_path_from_dotnet_root(*out_dotnet_root, out_fxr_path); #endif // !FEATURE_APPHOST && !FEATURE_LIBHOST } bool fxr_resolver::try_get_path_from_dotnet_root(const pal::string_t& dotnet_root, pal::string_t* out_fxr_path) { pal::string_t fxr_dir = dotnet_root; append_path(&fxr_dir, _X("host")); append_path(&fxr_dir, _X("fxr")); if (!pal::directory_exists(fxr_dir)) { trace::error(_X("A fatal error occurred. The folder [%s] does not exist"), fxr_dir.c_str()); return false; } return get_latest_fxr(std::move(fxr_dir), out_fxr_path); } bool fxr_resolver::try_get_existing_fxr(pal::dll_t* out_fxr, pal::string_t* out_fxr_path) { if (!pal::get_loaded_library(LIBFXR_NAME, "hostfxr_main", out_fxr, out_fxr_path)) return false; trace::verbose(_X("Found previously loaded library %s [%s]."), LIBFXR_NAME, out_fxr_path->c_str()); return true; }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/nativeaot/Runtime/AsmOffsets.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // This file is used by AsmOffsets.cpp to validate that our // assembly-code offsets always match their C++ counterparts. // You must #define PLAT_ASM_OFFSET and PLAT_ASM_SIZEOF before you #include this file #ifdef HOST_64BIT #define ASM_OFFSET(offset32, offset64, cls, member) PLAT_ASM_OFFSET(offset64, cls, member) #define ASM_SIZEOF(sizeof32, sizeof64, cls ) PLAT_ASM_SIZEOF(sizeof64, cls) #define ASM_CONST(const32, const64, expr) PLAT_ASM_CONST(const64, expr) #else #define ASM_OFFSET(offset32, offset64, cls, member) PLAT_ASM_OFFSET(offset32, cls, member) #define ASM_SIZEOF(sizeof32, sizeof64, cls ) PLAT_ASM_SIZEOF(sizeof32, cls) #define ASM_CONST(const32, const64, expr) PLAT_ASM_CONST(const32, expr) #endif // NOTE: the values MUST be in hex notation WITHOUT the 0x prefix // 32-bit,64-bit, constant symbol ASM_CONST( 400, 800, CLUMP_SIZE) ASM_CONST( a, b, LOG2_CLUMP_SIZE) // 32-bit,64-bit, class, member ASM_OFFSET( 0, 0, Object, m_pEEType) ASM_OFFSET( 4, 8, Array, m_Length) ASM_OFFSET( 4, 8, String, m_Length) ASM_OFFSET( 8, C, String, m_FirstChar) ASM_CONST( 2, 2, STRING_COMPONENT_SIZE) ASM_CONST( E, 16, STRING_BASE_SIZE) ASM_CONST(3FFFFFDF,3FFFFFDF,MAX_STRING_LENGTH) ASM_OFFSET( 0, 0, MethodTable, m_usComponentSize) ASM_OFFSET( 2, 2, MethodTable, m_usFlags) ASM_OFFSET( 4, 4, MethodTable, m_uBaseSize) ASM_OFFSET( 14, 18, MethodTable, m_VTable) ASM_OFFSET( 0, 0, Thread, m_rgbAllocContextBuffer) ASM_OFFSET( 28, 38, Thread, m_ThreadStateFlags) ASM_OFFSET( 2c, 40, Thread, m_pTransitionFrame) ASM_OFFSET( 30, 48, Thread, m_pHackPInvokeTunnel) ASM_OFFSET( 40, 68, Thread, m_ppvHijackedReturnAddressLocation) ASM_OFFSET( 44, 70, Thread, m_pvHijackedReturnAddress) #ifdef HOST_64BIT ASM_OFFSET( 0, 78, Thread, m_uHijackedReturnValueFlags) #endif ASM_OFFSET( 48, 80, Thread, m_pExInfoStackHead) ASM_OFFSET( 4c, 88, Thread, m_threadAbortException) ASM_OFFSET( 50, 90, Thread, m_pThreadLocalModuleStatics) ASM_OFFSET( 54, 98, Thread, m_numThreadLocalModuleStatics) ASM_SIZEOF( 14, 20, EHEnum) ASM_OFFSET( 0, 0, gc_alloc_context, alloc_ptr) ASM_OFFSET( 4, 8, gc_alloc_context, alloc_limit) #ifdef FEATURE_CACHED_INTERFACE_DISPATCH ASM_OFFSET( 4, 8, InterfaceDispatchCell, m_pCache) #ifndef HOST_64BIT ASM_OFFSET( 8, 0, InterfaceDispatchCache, m_pCell) #endif ASM_OFFSET( 10, 20, InterfaceDispatchCache, m_rgEntries) ASM_SIZEOF( 8, 10, InterfaceDispatchCacheEntry) #endif #ifdef FEATURE_DYNAMIC_CODE ASM_OFFSET( 0, 0, CallDescrData, pSrc) ASM_OFFSET( 4, 8, CallDescrData, numStackSlots) ASM_OFFSET( 8, C, CallDescrData, fpReturnSize) ASM_OFFSET( C, 10, CallDescrData, pArgumentRegisters) ASM_OFFSET( 10, 18, CallDescrData, pFloatArgumentRegisters) ASM_OFFSET( 14, 20, CallDescrData, pTarget) ASM_OFFSET( 18, 28, CallDescrData, pReturnBuffer) #endif // Undefine macros that are only used in this header for convenience. #undef ASM_OFFSET #undef ASM_SIZEOF #undef ASM_CONST // Define platform specific offsets #include "AsmOffsetsCpu.h" //#define USE_COMPILE_TIME_CONSTANT_FINDER // Uncomment this line to use the constant finder #if defined(__cplusplus) && defined(USE_COMPILE_TIME_CONSTANT_FINDER) // This class causes the compiler to emit an error with the constant we're interested in // in the error message. This is useful if a size or offset changes. To use, comment out // the compile-time assert that is firing, enable the constant finder, add the appropriate // constant to find to BogusFunction(), and build. // // Here's a sample compiler error: // In file included from corert/src/Native/Runtime/AsmOffsetsVerify.cpp:38: // corert/src/Native/Runtime/Full/../AsmOffsets.h:117:61: error: calling a private constructor of class // 'AsmOffsets::FindCompileTimeConstant<25>' // FindCompileTimeConstant<offsetof(ExInfo, m_passNumber)> bogus_variable; // ^ // corert/src/Native/Runtime/Full/../AsmOffsets.h:111:5: note: declared private here // FindCompileTimeConstant(); // ^ template<size_t N> class FindCompileTimeConstant { private: FindCompileTimeConstant(); }; void BogusFunction() { // Sample usage to generate the error FindCompileTimeConstant<sizeof(ExInfo)> bogus_variable; FindCompileTimeConstant<offsetof(ExInfo, m_notifyDebuggerSP)> bogus_variable2; FindCompileTimeConstant<sizeof(StackFrameIterator)> bogus_variable3; FindCompileTimeConstant<sizeof(PAL_LIMITED_CONTEXT)> bogus_variable4; FindCompileTimeConstant<offsetof(PAL_LIMITED_CONTEXT, IP)> bogus_variable5; } #endif // defined(__cplusplus) && defined(USE_COMPILE_TIME_CONSTANT_FINDER)
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // This file is used by AsmOffsets.cpp to validate that our // assembly-code offsets always match their C++ counterparts. // You must #define PLAT_ASM_OFFSET and PLAT_ASM_SIZEOF before you #include this file #ifdef HOST_64BIT #define ASM_OFFSET(offset32, offset64, cls, member) PLAT_ASM_OFFSET(offset64, cls, member) #define ASM_SIZEOF(sizeof32, sizeof64, cls ) PLAT_ASM_SIZEOF(sizeof64, cls) #define ASM_CONST(const32, const64, expr) PLAT_ASM_CONST(const64, expr) #else #define ASM_OFFSET(offset32, offset64, cls, member) PLAT_ASM_OFFSET(offset32, cls, member) #define ASM_SIZEOF(sizeof32, sizeof64, cls ) PLAT_ASM_SIZEOF(sizeof32, cls) #define ASM_CONST(const32, const64, expr) PLAT_ASM_CONST(const32, expr) #endif // NOTE: the values MUST be in hex notation WITHOUT the 0x prefix // 32-bit,64-bit, constant symbol ASM_CONST( 400, 800, CLUMP_SIZE) ASM_CONST( a, b, LOG2_CLUMP_SIZE) // 32-bit,64-bit, class, member ASM_OFFSET( 0, 0, Object, m_pEEType) ASM_OFFSET( 4, 8, Array, m_Length) ASM_OFFSET( 4, 8, String, m_Length) ASM_OFFSET( 8, C, String, m_FirstChar) ASM_CONST( 2, 2, STRING_COMPONENT_SIZE) ASM_CONST( E, 16, STRING_BASE_SIZE) ASM_CONST(3FFFFFDF,3FFFFFDF,MAX_STRING_LENGTH) ASM_OFFSET( 0, 0, MethodTable, m_usComponentSize) ASM_OFFSET( 2, 2, MethodTable, m_usFlags) ASM_OFFSET( 4, 4, MethodTable, m_uBaseSize) ASM_OFFSET( 14, 18, MethodTable, m_VTable) ASM_OFFSET( 0, 0, Thread, m_rgbAllocContextBuffer) ASM_OFFSET( 28, 38, Thread, m_ThreadStateFlags) ASM_OFFSET( 2c, 40, Thread, m_pTransitionFrame) ASM_OFFSET( 30, 48, Thread, m_pHackPInvokeTunnel) ASM_OFFSET( 40, 68, Thread, m_ppvHijackedReturnAddressLocation) ASM_OFFSET( 44, 70, Thread, m_pvHijackedReturnAddress) #ifdef HOST_64BIT ASM_OFFSET( 0, 78, Thread, m_uHijackedReturnValueFlags) #endif ASM_OFFSET( 48, 80, Thread, m_pExInfoStackHead) ASM_OFFSET( 4c, 88, Thread, m_threadAbortException) ASM_OFFSET( 50, 90, Thread, m_pThreadLocalModuleStatics) ASM_OFFSET( 54, 98, Thread, m_numThreadLocalModuleStatics) ASM_SIZEOF( 14, 20, EHEnum) ASM_OFFSET( 0, 0, gc_alloc_context, alloc_ptr) ASM_OFFSET( 4, 8, gc_alloc_context, alloc_limit) #ifdef FEATURE_CACHED_INTERFACE_DISPATCH ASM_OFFSET( 4, 8, InterfaceDispatchCell, m_pCache) #ifndef HOST_64BIT ASM_OFFSET( 8, 0, InterfaceDispatchCache, m_pCell) #endif ASM_OFFSET( 10, 20, InterfaceDispatchCache, m_rgEntries) ASM_SIZEOF( 8, 10, InterfaceDispatchCacheEntry) #endif #ifdef FEATURE_DYNAMIC_CODE ASM_OFFSET( 0, 0, CallDescrData, pSrc) ASM_OFFSET( 4, 8, CallDescrData, numStackSlots) ASM_OFFSET( 8, C, CallDescrData, fpReturnSize) ASM_OFFSET( C, 10, CallDescrData, pArgumentRegisters) ASM_OFFSET( 10, 18, CallDescrData, pFloatArgumentRegisters) ASM_OFFSET( 14, 20, CallDescrData, pTarget) ASM_OFFSET( 18, 28, CallDescrData, pReturnBuffer) #endif // Undefine macros that are only used in this header for convenience. #undef ASM_OFFSET #undef ASM_SIZEOF #undef ASM_CONST // Define platform specific offsets #include "AsmOffsetsCpu.h" //#define USE_COMPILE_TIME_CONSTANT_FINDER // Uncomment this line to use the constant finder #if defined(__cplusplus) && defined(USE_COMPILE_TIME_CONSTANT_FINDER) // This class causes the compiler to emit an error with the constant we're interested in // in the error message. This is useful if a size or offset changes. To use, comment out // the compile-time assert that is firing, enable the constant finder, add the appropriate // constant to find to BogusFunction(), and build. // // Here's a sample compiler error: // In file included from corert/src/Native/Runtime/AsmOffsetsVerify.cpp:38: // corert/src/Native/Runtime/Full/../AsmOffsets.h:117:61: error: calling a private constructor of class // 'AsmOffsets::FindCompileTimeConstant<25>' // FindCompileTimeConstant<offsetof(ExInfo, m_passNumber)> bogus_variable; // ^ // corert/src/Native/Runtime/Full/../AsmOffsets.h:111:5: note: declared private here // FindCompileTimeConstant(); // ^ template<size_t N> class FindCompileTimeConstant { private: FindCompileTimeConstant(); }; void BogusFunction() { // Sample usage to generate the error FindCompileTimeConstant<sizeof(ExInfo)> bogus_variable; FindCompileTimeConstant<offsetof(ExInfo, m_notifyDebuggerSP)> bogus_variable2; FindCompileTimeConstant<sizeof(StackFrameIterator)> bogus_variable3; FindCompileTimeConstant<sizeof(PAL_LIMITED_CONTEXT)> bogus_variable4; FindCompileTimeConstant<offsetof(PAL_LIMITED_CONTEXT, IP)> bogus_variable5; } #endif // defined(__cplusplus) && defined(USE_COMPILE_TIME_CONSTANT_FINDER)
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/ildasm/dynamicarray.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef DYNAMICARRAY_H #define DYNAMICARRAY_H #include "memory.h" const int START_SIZE = 24 ; const int MIN_SIZE = 8 ; template <class T> class DynamicArray { public: DynamicArray(int iSize = START_SIZE) ; ~DynamicArray() ; T& operator[](int i) ; bool Error() ; private: T* m_pArray ; int m_iMemSize ; int m_iArraySize ; bool m_bError ; }; /************************************************************************ * * * Default constructor. User has option to pass in the size of the * * initial array. * * * ************************************************************************/ template<class T> DynamicArray<T>::DynamicArray(int iSize) { if( iSize < MIN_SIZE ) { iSize = MIN_SIZE ; } m_pArray = new T[iSize] ; m_iMemSize = iSize ; m_iArraySize = 0 ; m_bError = false ; } /************************************************************************ * * * Destructor. All it really has to do is delete the array. * * * ************************************************************************/ template<class T> DynamicArray<T>::~DynamicArray() { if( m_pArray ) { delete [] m_pArray ; } } /************************************************************************ * * * operator [] to work on the left or right side of the equation. * * * ************************************************************************/ template<class T> T& DynamicArray<T>::operator [](int iIndex) { if( iIndex < 0 ) { // Error, set error value to true and return the first element of the array m_bError = true ; return m_pArray[0] ; } else if ( iIndex >= m_iArraySize ) { if( iIndex >= m_iMemSize ) { int iNewSize ; if( iIndex >= m_iMemSize * 2 ) { iNewSize = iIndex + 1 ; } else { iNewSize = m_iMemSize * 2 ; } // We need to allocate more memory T* pTmp = new T[iNewSize] ; memcpy(pTmp, m_pArray, m_iMemSize * sizeof(T)) ; delete [] m_pArray ; m_pArray = pTmp ; // Record the new memory size m_iMemSize = iNewSize ; } //ZeroMemory(&m_pArray[iIndex], sizeof(T)) ; ++m_iArraySize ; } return m_pArray[iIndex] ; } template<class T> bool DynamicArray<T>::Error() { return m_bError ; } #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef DYNAMICARRAY_H #define DYNAMICARRAY_H #include "memory.h" const int START_SIZE = 24 ; const int MIN_SIZE = 8 ; template <class T> class DynamicArray { public: DynamicArray(int iSize = START_SIZE) ; ~DynamicArray() ; T& operator[](int i) ; bool Error() ; private: T* m_pArray ; int m_iMemSize ; int m_iArraySize ; bool m_bError ; }; /************************************************************************ * * * Default constructor. User has option to pass in the size of the * * initial array. * * * ************************************************************************/ template<class T> DynamicArray<T>::DynamicArray(int iSize) { if( iSize < MIN_SIZE ) { iSize = MIN_SIZE ; } m_pArray = new T[iSize] ; m_iMemSize = iSize ; m_iArraySize = 0 ; m_bError = false ; } /************************************************************************ * * * Destructor. All it really has to do is delete the array. * * * ************************************************************************/ template<class T> DynamicArray<T>::~DynamicArray() { if( m_pArray ) { delete [] m_pArray ; } } /************************************************************************ * * * operator [] to work on the left or right side of the equation. * * * ************************************************************************/ template<class T> T& DynamicArray<T>::operator [](int iIndex) { if( iIndex < 0 ) { // Error, set error value to true and return the first element of the array m_bError = true ; return m_pArray[0] ; } else if ( iIndex >= m_iArraySize ) { if( iIndex >= m_iMemSize ) { int iNewSize ; if( iIndex >= m_iMemSize * 2 ) { iNewSize = iIndex + 1 ; } else { iNewSize = m_iMemSize * 2 ; } // We need to allocate more memory T* pTmp = new T[iNewSize] ; memcpy(pTmp, m_pArray, m_iMemSize * sizeof(T)) ; delete [] m_pArray ; m_pArray = pTmp ; // Record the new memory size m_iMemSize = iNewSize ; } //ZeroMemory(&m_pArray[iIndex], sizeof(T)) ; ++m_iArraySize ; } return m_pArray[iIndex] ; } template<class T> bool DynamicArray<T>::Error() { return m_bError ; } #endif
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/pal/src/libunwind/include/x86/jmpbuf.h
/* libunwind - a platform-independent unwind library Copyright (C) 2004 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* Use glibc's jump-buffer indices; NPTL peeks at SP: */ #define JB_SP 4 #define JB_RP 5 #define JB_MASK_SAVED 6 #define JB_MASK 7
/* libunwind - a platform-independent unwind library Copyright (C) 2004 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* Use glibc's jump-buffer indices; NPTL peeks at SP: */ #define JB_SP 4 #define JB_RP 5 #define JB_MASK_SAVED 6 #define JB_MASK 7
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/jit/phase.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*****************************************************************************/ #ifndef _PHASE_H_ #define _PHASE_H_ // A phase encapsulates a part of the compilation pipeline for a method. // class Phase { // Observations made before a phase runs that should still // be true afterwards,if the phase status is MODIFIED_NOTHING. class Observations { public: Observations(Compiler* compiler); void Check(PhaseStatus status); private: #ifdef DEBUG Compiler* m_compiler; unsigned m_fgBBcount; unsigned m_fgBBNumMax; unsigned m_compHndBBtabCount; unsigned m_lvaCount; unsigned m_compGenTreeID; unsigned m_compStatementID; unsigned m_compBasicBlockID; #endif // DEBUG }; public: virtual void Run(); protected: Phase(Compiler* _compiler, Phases _phase) : comp(_compiler), m_name(nullptr), m_phase(_phase) { m_name = PhaseNames[_phase]; } virtual void PrePhase(); virtual PhaseStatus DoPhase() = 0; virtual void PostPhase(PhaseStatus status); Compiler* comp; const char* m_name; Phases m_phase; }; // A phase that accepts a lambda for the actions done by the phase. // template <typename A> class ActionPhase final : public Phase { public: ActionPhase(Compiler* _compiler, Phases _phase, A _action) : Phase(_compiler, _phase), action(_action) { } protected: virtual PhaseStatus DoPhase() override { action(); return PhaseStatus::MODIFIED_EVERYTHING; } private: A action; }; // Wrappers for using ActionPhase // template <typename A> void DoPhase(Compiler* _compiler, Phases _phase, A _action) { ActionPhase<A> phase(_compiler, _phase, _action); phase.Run(); } // A simple phase that just invokes a method on the compiler instance // class CompilerPhase final : public Phase { public: CompilerPhase(Compiler* _compiler, Phases _phase, void (Compiler::*_action)()) : Phase(_compiler, _phase), action(_action) { } protected: virtual PhaseStatus DoPhase() override { (comp->*action)(); return PhaseStatus::MODIFIED_EVERYTHING; } private: void (Compiler::*action)(); }; // Wrapper for using CompilePhase // inline void DoPhase(Compiler* _compiler, Phases _phase, void (Compiler::*_action)()) { CompilerPhase phase(_compiler, _phase, _action); phase.Run(); } // A simple phase that just invokes a method on the compiler instance // where the method being invoked returns a PhaseStatus // class CompilerPhaseWithStatus final : public Phase { public: CompilerPhaseWithStatus(Compiler* _compiler, Phases _phase, PhaseStatus (Compiler::*_action)()) : Phase(_compiler, _phase), action(_action) { } protected: virtual PhaseStatus DoPhase() override { return (comp->*action)(); } private: PhaseStatus (Compiler::*action)(); }; // Wrapper for using CompilePhaseWithStatus // inline void DoPhase(Compiler* _compiler, Phases _phase, PhaseStatus (Compiler::*_action)()) { CompilerPhaseWithStatus phase(_compiler, _phase, _action); phase.Run(); } #endif /* End of _PHASE_H_ */
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*****************************************************************************/ #ifndef _PHASE_H_ #define _PHASE_H_ // A phase encapsulates a part of the compilation pipeline for a method. // class Phase { // Observations made before a phase runs that should still // be true afterwards,if the phase status is MODIFIED_NOTHING. class Observations { public: Observations(Compiler* compiler); void Check(PhaseStatus status); private: #ifdef DEBUG Compiler* m_compiler; unsigned m_fgBBcount; unsigned m_fgBBNumMax; unsigned m_compHndBBtabCount; unsigned m_lvaCount; unsigned m_compGenTreeID; unsigned m_compStatementID; unsigned m_compBasicBlockID; #endif // DEBUG }; public: virtual void Run(); protected: Phase(Compiler* _compiler, Phases _phase) : comp(_compiler), m_name(nullptr), m_phase(_phase) { m_name = PhaseNames[_phase]; } virtual void PrePhase(); virtual PhaseStatus DoPhase() = 0; virtual void PostPhase(PhaseStatus status); Compiler* comp; const char* m_name; Phases m_phase; }; // A phase that accepts a lambda for the actions done by the phase. // template <typename A> class ActionPhase final : public Phase { public: ActionPhase(Compiler* _compiler, Phases _phase, A _action) : Phase(_compiler, _phase), action(_action) { } protected: virtual PhaseStatus DoPhase() override { action(); return PhaseStatus::MODIFIED_EVERYTHING; } private: A action; }; // Wrappers for using ActionPhase // template <typename A> void DoPhase(Compiler* _compiler, Phases _phase, A _action) { ActionPhase<A> phase(_compiler, _phase, _action); phase.Run(); } // A simple phase that just invokes a method on the compiler instance // class CompilerPhase final : public Phase { public: CompilerPhase(Compiler* _compiler, Phases _phase, void (Compiler::*_action)()) : Phase(_compiler, _phase), action(_action) { } protected: virtual PhaseStatus DoPhase() override { (comp->*action)(); return PhaseStatus::MODIFIED_EVERYTHING; } private: void (Compiler::*action)(); }; // Wrapper for using CompilePhase // inline void DoPhase(Compiler* _compiler, Phases _phase, void (Compiler::*_action)()) { CompilerPhase phase(_compiler, _phase, _action); phase.Run(); } // A simple phase that just invokes a method on the compiler instance // where the method being invoked returns a PhaseStatus // class CompilerPhaseWithStatus final : public Phase { public: CompilerPhaseWithStatus(Compiler* _compiler, Phases _phase, PhaseStatus (Compiler::*_action)()) : Phase(_compiler, _phase), action(_action) { } protected: virtual PhaseStatus DoPhase() override { return (comp->*action)(); } private: PhaseStatus (Compiler::*action)(); }; // Wrapper for using CompilePhaseWithStatus // inline void DoPhase(Compiler* _compiler, Phases _phase, PhaseStatus (Compiler::*_action)()) { CompilerPhaseWithStatus phase(_compiler, _phase, _action); phase.Run(); } #endif /* End of _PHASE_H_ */
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/tests/JIT/HardwareIntrinsics/X86/Sse1/CompareScalarOrderedEqual.Boolean.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void CompareScalarOrderedEqualBoolean() { var test = new BooleanBinaryOpTest__CompareScalarOrderedEqualBoolean(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Sse.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Sse.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); if (Sse.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Sse.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (Sse.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (Sse.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (Sse.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (Sse.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class BooleanBinaryOpTest__CompareScalarOrderedEqualBoolean { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private GCHandle inHandle1; private GCHandle inHandle2; private ulong alignment; public DataTable(Single[] inArray1, Single[] inArray2, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Single>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Single>(); if ((alignment != 32 && alignment != 16) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Single, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Single, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Single> _fld1; public Vector128<Single> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref testStruct._fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref testStruct._fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); return testStruct; } public void RunStructFldScenario(BooleanBinaryOpTest__CompareScalarOrderedEqualBoolean testClass) { var result = Sse.CompareScalarOrderedEqual(_fld1, _fld2); testClass.ValidateResult(_fld1, _fld2, result); } public void RunStructFldScenario_Load(BooleanBinaryOpTest__CompareScalarOrderedEqualBoolean testClass) { fixed (Vector128<Single>* pFld1 = &_fld1) fixed (Vector128<Single>* pFld2 = &_fld2) { var result = Sse.CompareScalarOrderedEqual( Sse.LoadVector128((Single*)(pFld1)), Sse.LoadVector128((Single*)(pFld2)) ); testClass.ValidateResult(_fld1, _fld2, result); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Single>>() / sizeof(Single); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Single>>() / sizeof(Single); private static Single[] _data1 = new Single[Op1ElementCount]; private static Single[] _data2 = new Single[Op2ElementCount]; private static Vector128<Single> _clsVar1; private static Vector128<Single> _clsVar2; private Vector128<Single> _fld1; private Vector128<Single> _fld2; private DataTable _dataTable; static BooleanBinaryOpTest__CompareScalarOrderedEqualBoolean() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _clsVar1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _clsVar2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); } public BooleanBinaryOpTest__CompareScalarOrderedEqualBoolean() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } _dataTable = new DataTable(_data1, _data2, LargestVectorSize); } public bool IsSupported => Sse.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Sse.CompareScalarOrderedEqual( Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Sse.CompareScalarOrderedEqual( Sse.LoadVector128((Single*)(_dataTable.inArray1Ptr)), Sse.LoadVector128((Single*)(_dataTable.inArray2Ptr)) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Sse.CompareScalarOrderedEqual( Sse.LoadAlignedVector128((Single*)(_dataTable.inArray1Ptr)), Sse.LoadAlignedVector128((Single*)(_dataTable.inArray2Ptr)) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Sse).GetMethod(nameof(Sse.CompareScalarOrderedEqual), new Type[] { typeof(Vector128<Single>), typeof(Vector128<Single>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Sse).GetMethod(nameof(Sse.CompareScalarOrderedEqual), new Type[] { typeof(Vector128<Single>), typeof(Vector128<Single>) }) .Invoke(null, new object[] { Sse.LoadVector128((Single*)(_dataTable.inArray1Ptr)), Sse.LoadVector128((Single*)(_dataTable.inArray2Ptr)) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Sse).GetMethod(nameof(Sse.CompareScalarOrderedEqual), new Type[] { typeof(Vector128<Single>), typeof(Vector128<Single>) }) .Invoke(null, new object[] { Sse.LoadAlignedVector128((Single*)(_dataTable.inArray1Ptr)), Sse.LoadAlignedVector128((Single*)(_dataTable.inArray2Ptr)) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Sse.CompareScalarOrderedEqual( _clsVar1, _clsVar2 ); ValidateResult(_clsVar1, _clsVar2, result); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Single>* pClsVar1 = &_clsVar1) fixed (Vector128<Single>* pClsVar2 = &_clsVar2) { var result = Sse.CompareScalarOrderedEqual( Sse.LoadVector128((Single*)(pClsVar1)), Sse.LoadVector128((Single*)(pClsVar2)) ); ValidateResult(_clsVar1, _clsVar2, result); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr); var result = Sse.CompareScalarOrderedEqual(op1, op2); ValidateResult(op1, op2, result); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = Sse.LoadVector128((Single*)(_dataTable.inArray1Ptr)); var op2 = Sse.LoadVector128((Single*)(_dataTable.inArray2Ptr)); var result = Sse.CompareScalarOrderedEqual(op1, op2); ValidateResult(op1, op2, result); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var op1 = Sse.LoadAlignedVector128((Single*)(_dataTable.inArray1Ptr)); var op2 = Sse.LoadAlignedVector128((Single*)(_dataTable.inArray2Ptr)); var result = Sse.CompareScalarOrderedEqual(op1, op2); ValidateResult(op1, op2, result); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new BooleanBinaryOpTest__CompareScalarOrderedEqualBoolean(); var result = Sse.CompareScalarOrderedEqual(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new BooleanBinaryOpTest__CompareScalarOrderedEqualBoolean(); fixed (Vector128<Single>* pFld1 = &test._fld1) fixed (Vector128<Single>* pFld2 = &test._fld2) { var result = Sse.CompareScalarOrderedEqual( Sse.LoadVector128((Single*)(pFld1)), Sse.LoadVector128((Single*)(pFld2)) ); ValidateResult(test._fld1, test._fld2, result); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Sse.CompareScalarOrderedEqual(_fld1, _fld2); ValidateResult(_fld1, _fld2, result); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Single>* pFld1 = &_fld1) fixed (Vector128<Single>* pFld2 = &_fld2) { var result = Sse.CompareScalarOrderedEqual( Sse.LoadVector128((Single*)(pFld1)), Sse.LoadVector128((Single*)(pFld2)) ); ValidateResult(_fld1, _fld2, result); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Sse.CompareScalarOrderedEqual(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = Sse.CompareScalarOrderedEqual( Sse.LoadVector128((Single*)(&test._fld1)), Sse.LoadVector128((Single*)(&test._fld2)) ); ValidateResult(test._fld1, test._fld2, result); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Single> op1, Vector128<Single> op2, bool result, [CallerMemberName] string method = "") { Single[] inArray1 = new Single[Op1ElementCount]; Single[] inArray2 = new Single[Op2ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), op2); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "") { Single[] inArray1 = new Single[Op1ElementCount]; Single[] inArray2 = new Single[Op2ElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Single>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Single>>()); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(Single[] left, Single[] right, bool result, [CallerMemberName] string method = "") { bool succeeded = true; if ((left[0] == right[0]) != result) { succeeded = false; } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Sse)}.{nameof(Sse.CompareScalarOrderedEqual)}<Boolean>(Vector128<Single>, Vector128<Single>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({result})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void CompareScalarOrderedEqualBoolean() { var test = new BooleanBinaryOpTest__CompareScalarOrderedEqualBoolean(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Sse.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Sse.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); if (Sse.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Sse.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (Sse.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (Sse.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (Sse.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (Sse.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class BooleanBinaryOpTest__CompareScalarOrderedEqualBoolean { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private GCHandle inHandle1; private GCHandle inHandle2; private ulong alignment; public DataTable(Single[] inArray1, Single[] inArray2, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Single>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Single>(); if ((alignment != 32 && alignment != 16) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Single, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Single, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Single> _fld1; public Vector128<Single> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref testStruct._fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref testStruct._fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); return testStruct; } public void RunStructFldScenario(BooleanBinaryOpTest__CompareScalarOrderedEqualBoolean testClass) { var result = Sse.CompareScalarOrderedEqual(_fld1, _fld2); testClass.ValidateResult(_fld1, _fld2, result); } public void RunStructFldScenario_Load(BooleanBinaryOpTest__CompareScalarOrderedEqualBoolean testClass) { fixed (Vector128<Single>* pFld1 = &_fld1) fixed (Vector128<Single>* pFld2 = &_fld2) { var result = Sse.CompareScalarOrderedEqual( Sse.LoadVector128((Single*)(pFld1)), Sse.LoadVector128((Single*)(pFld2)) ); testClass.ValidateResult(_fld1, _fld2, result); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Single>>() / sizeof(Single); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Single>>() / sizeof(Single); private static Single[] _data1 = new Single[Op1ElementCount]; private static Single[] _data2 = new Single[Op2ElementCount]; private static Vector128<Single> _clsVar1; private static Vector128<Single> _clsVar2; private Vector128<Single> _fld1; private Vector128<Single> _fld2; private DataTable _dataTable; static BooleanBinaryOpTest__CompareScalarOrderedEqualBoolean() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _clsVar1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _clsVar2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); } public BooleanBinaryOpTest__CompareScalarOrderedEqualBoolean() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } _dataTable = new DataTable(_data1, _data2, LargestVectorSize); } public bool IsSupported => Sse.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Sse.CompareScalarOrderedEqual( Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Sse.CompareScalarOrderedEqual( Sse.LoadVector128((Single*)(_dataTable.inArray1Ptr)), Sse.LoadVector128((Single*)(_dataTable.inArray2Ptr)) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Sse.CompareScalarOrderedEqual( Sse.LoadAlignedVector128((Single*)(_dataTable.inArray1Ptr)), Sse.LoadAlignedVector128((Single*)(_dataTable.inArray2Ptr)) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Sse).GetMethod(nameof(Sse.CompareScalarOrderedEqual), new Type[] { typeof(Vector128<Single>), typeof(Vector128<Single>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Sse).GetMethod(nameof(Sse.CompareScalarOrderedEqual), new Type[] { typeof(Vector128<Single>), typeof(Vector128<Single>) }) .Invoke(null, new object[] { Sse.LoadVector128((Single*)(_dataTable.inArray1Ptr)), Sse.LoadVector128((Single*)(_dataTable.inArray2Ptr)) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Sse).GetMethod(nameof(Sse.CompareScalarOrderedEqual), new Type[] { typeof(Vector128<Single>), typeof(Vector128<Single>) }) .Invoke(null, new object[] { Sse.LoadAlignedVector128((Single*)(_dataTable.inArray1Ptr)), Sse.LoadAlignedVector128((Single*)(_dataTable.inArray2Ptr)) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Sse.CompareScalarOrderedEqual( _clsVar1, _clsVar2 ); ValidateResult(_clsVar1, _clsVar2, result); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Single>* pClsVar1 = &_clsVar1) fixed (Vector128<Single>* pClsVar2 = &_clsVar2) { var result = Sse.CompareScalarOrderedEqual( Sse.LoadVector128((Single*)(pClsVar1)), Sse.LoadVector128((Single*)(pClsVar2)) ); ValidateResult(_clsVar1, _clsVar2, result); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr); var result = Sse.CompareScalarOrderedEqual(op1, op2); ValidateResult(op1, op2, result); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = Sse.LoadVector128((Single*)(_dataTable.inArray1Ptr)); var op2 = Sse.LoadVector128((Single*)(_dataTable.inArray2Ptr)); var result = Sse.CompareScalarOrderedEqual(op1, op2); ValidateResult(op1, op2, result); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var op1 = Sse.LoadAlignedVector128((Single*)(_dataTable.inArray1Ptr)); var op2 = Sse.LoadAlignedVector128((Single*)(_dataTable.inArray2Ptr)); var result = Sse.CompareScalarOrderedEqual(op1, op2); ValidateResult(op1, op2, result); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new BooleanBinaryOpTest__CompareScalarOrderedEqualBoolean(); var result = Sse.CompareScalarOrderedEqual(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new BooleanBinaryOpTest__CompareScalarOrderedEqualBoolean(); fixed (Vector128<Single>* pFld1 = &test._fld1) fixed (Vector128<Single>* pFld2 = &test._fld2) { var result = Sse.CompareScalarOrderedEqual( Sse.LoadVector128((Single*)(pFld1)), Sse.LoadVector128((Single*)(pFld2)) ); ValidateResult(test._fld1, test._fld2, result); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Sse.CompareScalarOrderedEqual(_fld1, _fld2); ValidateResult(_fld1, _fld2, result); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Single>* pFld1 = &_fld1) fixed (Vector128<Single>* pFld2 = &_fld2) { var result = Sse.CompareScalarOrderedEqual( Sse.LoadVector128((Single*)(pFld1)), Sse.LoadVector128((Single*)(pFld2)) ); ValidateResult(_fld1, _fld2, result); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Sse.CompareScalarOrderedEqual(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = Sse.CompareScalarOrderedEqual( Sse.LoadVector128((Single*)(&test._fld1)), Sse.LoadVector128((Single*)(&test._fld2)) ); ValidateResult(test._fld1, test._fld2, result); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Single> op1, Vector128<Single> op2, bool result, [CallerMemberName] string method = "") { Single[] inArray1 = new Single[Op1ElementCount]; Single[] inArray2 = new Single[Op2ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), op2); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "") { Single[] inArray1 = new Single[Op1ElementCount]; Single[] inArray2 = new Single[Op2ElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Single>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Single>>()); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(Single[] left, Single[] right, bool result, [CallerMemberName] string method = "") { bool succeeded = true; if ((left[0] == right[0]) != result) { succeeded = false; } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Sse)}.{nameof(Sse.CompareScalarOrderedEqual)}<Boolean>(Vector128<Single>, Vector128<Single>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({result})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/libraries/System.ComponentModel.Composition/tests/System/ComponentModel/Composition/Factories/CatalogFactory.DisposableComposablePartCatalog.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.ComponentModel.Composition.Primitives; using System.Linq; using Xunit; namespace System.ComponentModel.Composition.Factories { partial class CatalogFactory { // NOTE: Do not add any more behavior to this class, as ComposablePartCatalogTests.cs // uses this to verify default behavior of the base class. private class DisposableComposablePartCatalog : ComposablePartCatalog { private readonly Action<bool> _disposeCallback; public DisposableComposablePartCatalog(Action<bool> disposeCallback) { Assert.NotNull(disposeCallback); _disposeCallback = disposeCallback; } ~DisposableComposablePartCatalog() { Dispose(false); } public override IQueryable<ComposablePartDefinition> Parts { get { throw new NotImplementedException(); } } protected override void Dispose(bool disposing) { _disposeCallback(disposing); base.Dispose(disposing); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.ComponentModel.Composition.Primitives; using System.Linq; using Xunit; namespace System.ComponentModel.Composition.Factories { partial class CatalogFactory { // NOTE: Do not add any more behavior to this class, as ComposablePartCatalogTests.cs // uses this to verify default behavior of the base class. private class DisposableComposablePartCatalog : ComposablePartCatalog { private readonly Action<bool> _disposeCallback; public DisposableComposablePartCatalog(Action<bool> disposeCallback) { Assert.NotNull(disposeCallback); _disposeCallback = disposeCallback; } ~DisposableComposablePartCatalog() { Dispose(false); } public override IQueryable<ComposablePartDefinition> Parts { get { throw new NotImplementedException(); } } protected override void Dispose(bool disposing) { _disposeCallback(disposing); base.Dispose(disposing); } } } }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/tests/Loader/binding/assemblies/generics/arilistienum/methods/methods.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //array<T> IList properties using System; using System.IO; using System.Reflection; using System.Collections; public class GenClass<T> { public T fld; } public class PropsArIList { public static int Main() { int result = 0; int i, index = 0; bool bRes = false; try { //Part 1 - GenClass <int> Console.WriteLine("\ntest GenClass<int>"); GenClass<int> obj1; obj1 = new GenClass<int>(); obj1.fld = 3; Console.WriteLine (obj1.fld); GenClass<int>[] arGen; arGen = new GenClass<int>[5]; for (i=0;i<5;i++) { arGen[i] = new GenClass<int>(); arGen[i].fld = i; Console.Write (arGen[i].fld + "\t"); } Console.WriteLine(); IList interf1 = (IList) arGen; Console.WriteLine ("testing IList.Contains"); bRes = interf1.Contains (arGen[2]); if (bRes!=true) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } bRes = interf1.Contains (obj1); if (bRes!=false) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } Console.WriteLine ("testing IList.IndexOf"); index = interf1.IndexOf (arGen[2]); if (index!=2) { Console.WriteLine ("unexpected result: {0} \n test failed", index); return 110; } Console.WriteLine ("testing IList.Clear"); interf1.Clear(); for (i=0;i<5;i++) { if (arGen[i]!=null) { Console.WriteLine ("unexpected result: element {0} is not null \n test failed", i); return 110; } } //Part 2 - GenClass <string> Console.WriteLine("\ntest GenClass<string>"); GenClass<string> obj2; obj2 = new GenClass<string>(); obj2.fld = "name"; Console.WriteLine (obj2.fld); GenClass<string>[] arGenS; arGenS = new GenClass<string>[5]; string aux = "none"; for (i=0;i<5;i++) { arGenS[i] = new GenClass<string>(); aux = Convert.ToString(i); arGenS[i].fld = aux; Console.Write (arGenS[i].fld + "\t"); } Console.WriteLine(); IList interf2 = (IList) arGenS; Console.WriteLine ("testing IList.Contains"); bRes = interf2.Contains (arGenS[2]); if (bRes!=true) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } bRes = interf2.Contains (obj2); if (bRes!=false) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } bRes = interf2.Contains (obj1); if (bRes!=false) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } Console.WriteLine ("testing IList.IndexOf"); index = interf2.IndexOf (arGenS[2]); if (index!=2) { Console.WriteLine ("unexpected result: {0} \n test failed", index); return 110; } Console.WriteLine ("testing IList.Clear"); interf2.Clear(); for (i=0;i<5;i++) { if (arGenS[i]!=null) { Console.WriteLine ("unexpected result: element {0} is not null \n test failed", i); return 110; } } result = 100; //pass } catch (Exception e) { Console.WriteLine ("unexpected exception.."); Console.WriteLine (e); Console.WriteLine ("test failed"); return 101; } if (result==100) Console.WriteLine ("test passed"); else Console.WriteLine ("test failed"); return result; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //array<T> IList properties using System; using System.IO; using System.Reflection; using System.Collections; public class GenClass<T> { public T fld; } public class PropsArIList { public static int Main() { int result = 0; int i, index = 0; bool bRes = false; try { //Part 1 - GenClass <int> Console.WriteLine("\ntest GenClass<int>"); GenClass<int> obj1; obj1 = new GenClass<int>(); obj1.fld = 3; Console.WriteLine (obj1.fld); GenClass<int>[] arGen; arGen = new GenClass<int>[5]; for (i=0;i<5;i++) { arGen[i] = new GenClass<int>(); arGen[i].fld = i; Console.Write (arGen[i].fld + "\t"); } Console.WriteLine(); IList interf1 = (IList) arGen; Console.WriteLine ("testing IList.Contains"); bRes = interf1.Contains (arGen[2]); if (bRes!=true) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } bRes = interf1.Contains (obj1); if (bRes!=false) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } Console.WriteLine ("testing IList.IndexOf"); index = interf1.IndexOf (arGen[2]); if (index!=2) { Console.WriteLine ("unexpected result: {0} \n test failed", index); return 110; } Console.WriteLine ("testing IList.Clear"); interf1.Clear(); for (i=0;i<5;i++) { if (arGen[i]!=null) { Console.WriteLine ("unexpected result: element {0} is not null \n test failed", i); return 110; } } //Part 2 - GenClass <string> Console.WriteLine("\ntest GenClass<string>"); GenClass<string> obj2; obj2 = new GenClass<string>(); obj2.fld = "name"; Console.WriteLine (obj2.fld); GenClass<string>[] arGenS; arGenS = new GenClass<string>[5]; string aux = "none"; for (i=0;i<5;i++) { arGenS[i] = new GenClass<string>(); aux = Convert.ToString(i); arGenS[i].fld = aux; Console.Write (arGenS[i].fld + "\t"); } Console.WriteLine(); IList interf2 = (IList) arGenS; Console.WriteLine ("testing IList.Contains"); bRes = interf2.Contains (arGenS[2]); if (bRes!=true) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } bRes = interf2.Contains (obj2); if (bRes!=false) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } bRes = interf2.Contains (obj1); if (bRes!=false) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } Console.WriteLine ("testing IList.IndexOf"); index = interf2.IndexOf (arGenS[2]); if (index!=2) { Console.WriteLine ("unexpected result: {0} \n test failed", index); return 110; } Console.WriteLine ("testing IList.Clear"); interf2.Clear(); for (i=0;i<5;i++) { if (arGenS[i]!=null) { Console.WriteLine ("unexpected result: element {0} is not null \n test failed", i); return 110; } } result = 100; //pass } catch (Exception e) { Console.WriteLine ("unexpected exception.."); Console.WriteLine (e); Console.WriteLine ("test failed"); return 101; } if (result==100) Console.WriteLine ("test passed"); else Console.WriteLine ("test failed"); return result; } }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/tests/JIT/HardwareIntrinsics/General/Vector128_1/op_Subtraction.SByte.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void op_SubtractionSByte() { var test = new VectorBinaryOpTest__op_SubtractionSByte(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBinaryOpTest__op_SubtractionSByte { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(SByte[] inArray1, SByte[] inArray2, SByte[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<SByte>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<SByte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<SByte>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<SByte, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<SByte, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<SByte> _fld1; public Vector128<SByte> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); return testStruct; } public void RunStructFldScenario(VectorBinaryOpTest__op_SubtractionSByte testClass) { var result = _fld1 - _fld2; Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte); private static SByte[] _data1 = new SByte[Op1ElementCount]; private static SByte[] _data2 = new SByte[Op2ElementCount]; private static Vector128<SByte> _clsVar1; private static Vector128<SByte> _clsVar2; private Vector128<SByte> _fld1; private Vector128<SByte> _fld2; private DataTable _dataTable; static VectorBinaryOpTest__op_SubtractionSByte() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); } public VectorBinaryOpTest__op_SubtractionSByte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } _dataTable = new DataTable(_data1, _data2, new SByte[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr) - Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Vector128<SByte>).GetMethod("op_Subtraction", new Type[] { typeof(Vector128<SByte>), typeof(Vector128<SByte>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<SByte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = _clsVar1 - _clsVar2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr); var result = op1 - op2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBinaryOpTest__op_SubtractionSByte(); var result = test._fld1 - test._fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = _fld1 - _fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = test._fld1 - test._fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector128<SByte> op1, Vector128<SByte> op2, void* result, [CallerMemberName] string method = "") { SByte[] inArray1 = new SByte[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<SByte>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { SByte[] inArray1 = new SByte[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<SByte>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(SByte[] left, SByte[] right, SByte[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != (sbyte)(left[0] - right[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != (sbyte)(left[i] - right[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector128)}.op_Subtraction<SByte>(Vector128<SByte>, Vector128<SByte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void op_SubtractionSByte() { var test = new VectorBinaryOpTest__op_SubtractionSByte(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBinaryOpTest__op_SubtractionSByte { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(SByte[] inArray1, SByte[] inArray2, SByte[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<SByte>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<SByte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<SByte>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<SByte, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<SByte, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<SByte> _fld1; public Vector128<SByte> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); return testStruct; } public void RunStructFldScenario(VectorBinaryOpTest__op_SubtractionSByte testClass) { var result = _fld1 - _fld2; Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte); private static SByte[] _data1 = new SByte[Op1ElementCount]; private static SByte[] _data2 = new SByte[Op2ElementCount]; private static Vector128<SByte> _clsVar1; private static Vector128<SByte> _clsVar2; private Vector128<SByte> _fld1; private Vector128<SByte> _fld2; private DataTable _dataTable; static VectorBinaryOpTest__op_SubtractionSByte() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); } public VectorBinaryOpTest__op_SubtractionSByte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } _dataTable = new DataTable(_data1, _data2, new SByte[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr) - Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Vector128<SByte>).GetMethod("op_Subtraction", new Type[] { typeof(Vector128<SByte>), typeof(Vector128<SByte>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<SByte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = _clsVar1 - _clsVar2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr); var result = op1 - op2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBinaryOpTest__op_SubtractionSByte(); var result = test._fld1 - test._fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = _fld1 - _fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = test._fld1 - test._fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector128<SByte> op1, Vector128<SByte> op2, void* result, [CallerMemberName] string method = "") { SByte[] inArray1 = new SByte[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<SByte>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { SByte[] inArray1 = new SByte[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<SByte>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(SByte[] left, SByte[] right, SByte[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != (sbyte)(left[0] - right[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != (sbyte)(left[i] - right[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector128)}.op_Subtraction<SByte>(Vector128<SByte>, Vector128<SByte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/tests/JIT/Regression/CLR-x86-JIT/V1.1-M1-Beta1/b140118/b140118.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/libraries/System.Linq.Expressions/src/System/Linq/Expressions/UnaryExpression.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Dynamic.Utils; using System.Reflection; using System.Runtime.CompilerServices; namespace System.Linq.Expressions { /// <summary> /// Represents an expression that has a unary operator. /// </summary> [DebuggerTypeProxy(typeof(UnaryExpressionProxy))] public sealed class UnaryExpression : Expression { internal UnaryExpression(ExpressionType nodeType, Expression expression, Type type, MethodInfo? method) { Operand = expression; Method = method; NodeType = nodeType; Type = type; } /// <summary> /// Gets the static type of the expression that this <see cref="Expression"/> represents. (Inherited from <see cref="Expression"/>.) /// </summary> /// <returns>The <see cref="System.Type"/> that represents the static type of the expression.</returns> public sealed override Type Type { get; } /// <summary> /// Returns the node type of this <see cref="Expression"/>. (Inherited from <see cref="Expression"/>.) /// </summary> /// <returns>The <see cref="ExpressionType"/> that represents this expression.</returns> public sealed override ExpressionType NodeType { get; } /// <summary> /// Gets the operand of the unary operation. /// </summary> /// <returns> An <see cref="Expression"/> that represents the operand of the unary operation. Returns null if node type is <see cref="ExpressionType.Throw"/> with no operand.</returns> public Expression Operand { get; } /// <summary> /// Gets the implementing method for the unary operation. /// </summary> /// <returns>The <see cref="MethodInfo"/> that represents the implementing method.</returns> public MethodInfo? Method { get; } /// <summary> /// Gets a value that indicates whether the expression tree node represents a lifted call to an operator. /// </summary> /// <returns>true if the node represents a lifted call; otherwise, false.</returns> public bool IsLifted { get { if (NodeType == ExpressionType.TypeAs || NodeType == ExpressionType.Quote || NodeType == ExpressionType.Throw) { return false; } bool operandIsNullable = Operand.Type.IsNullableType(); bool resultIsNullable = this.Type.IsNullableType(); if (Method != null) { return (operandIsNullable && !TypeUtils.AreEquivalent(Method.GetParametersCached()[0].ParameterType, Operand.Type)) || (resultIsNullable && !TypeUtils.AreEquivalent(Method.ReturnType, this.Type)); } return operandIsNullable || resultIsNullable; } } /// <summary> /// Gets a value that indicates whether the expression tree node represents a lifted call to an operator whose return type is lifted to a nullable type. /// </summary> /// <returns>true if the operator's return type is lifted to a nullable type; otherwise, false.</returns> public bool IsLiftedToNull => IsLifted && this.Type.IsNullableType(); /// <summary> /// Dispatches to the specific visit method for this node type. /// </summary> protected internal override Expression Accept(ExpressionVisitor visitor) { return visitor.VisitUnary(this); } /// <summary> /// Gets a value that indicates whether the expression tree node can be reduced. /// </summary> public override bool CanReduce { get { switch (NodeType) { case ExpressionType.PreIncrementAssign: case ExpressionType.PreDecrementAssign: case ExpressionType.PostIncrementAssign: case ExpressionType.PostDecrementAssign: return true; } return false; } } /// <summary> /// Reduces the expression node to a simpler expression. /// If CanReduce returns true, this should return a valid expression. /// This method is allowed to return another node which itself /// must be reduced. /// </summary> /// <returns>The reduced expression.</returns> public override Expression Reduce() { if (CanReduce) { switch (Operand.NodeType) { case ExpressionType.Index: return ReduceIndex(); case ExpressionType.MemberAccess: return ReduceMember(); default: Debug.Assert(Operand.NodeType == ExpressionType.Parameter); return ReduceVariable(); } } return this; } private bool IsPrefix { get { return NodeType == ExpressionType.PreIncrementAssign || NodeType == ExpressionType.PreDecrementAssign; } } private UnaryExpression FunctionalOp(Expression operand) { ExpressionType functional; if (NodeType == ExpressionType.PreIncrementAssign || NodeType == ExpressionType.PostIncrementAssign) { functional = ExpressionType.Increment; } else { Debug.Assert(NodeType == ExpressionType.PreDecrementAssign || NodeType == ExpressionType.PostDecrementAssign); functional = ExpressionType.Decrement; } return new UnaryExpression(functional, operand, operand.Type, Method); } private Expression ReduceVariable() { if (IsPrefix) { // (op) var // ... is reduced into ... // var = op(var) return Assign(Operand, FunctionalOp(Operand)); } // var (op) // ... is reduced into ... // temp = var // var = op(var) // temp ParameterExpression temp = Parameter(Operand.Type, name: null); return Block( new TrueReadOnlyCollection<ParameterExpression>(temp), new TrueReadOnlyCollection<Expression>( Assign(temp, Operand), Assign(Operand, FunctionalOp(temp)), temp ) ); } private Expression ReduceMember() { var member = (MemberExpression)Operand; if (member.Expression == null) { //static member, reduce the same as variable return ReduceVariable(); } else { ParameterExpression temp1 = Parameter(member.Expression.Type, name: null); BinaryExpression initTemp1 = Assign(temp1, member.Expression); member = MakeMemberAccess(temp1, member.Member); if (IsPrefix) { // (op) value.member // ... is reduced into ... // temp1 = value // temp1.member = op(temp1.member) return Block( new TrueReadOnlyCollection<ParameterExpression>(temp1), new TrueReadOnlyCollection<Expression>( initTemp1, Assign(member, FunctionalOp(member)) ) ); } // value.member (op) // ... is reduced into ... // temp1 = value // temp2 = temp1.member // temp1.member = op(temp2) // temp2 ParameterExpression temp2 = Parameter(member.Type, name: null); return Block( new TrueReadOnlyCollection<ParameterExpression>(temp1, temp2), new TrueReadOnlyCollection<Expression>( initTemp1, Assign(temp2, member), Assign(member, FunctionalOp(temp2)), temp2 ) ); } } private Expression ReduceIndex() { // left[a0, a1, ... aN] (op) // // ... is reduced into ... // // tempObj = left // tempArg0 = a0 // ... // tempArgN = aN // tempValue = tempObj[tempArg0, ... tempArgN] // tempObj[tempArg0, ... tempArgN] = op(tempValue) // tempValue bool prefix = IsPrefix; var index = (IndexExpression)Operand; int count = index.ArgumentCount; var block = new Expression[count + (prefix ? 2 : 4)]; var temps = new ParameterExpression[count + (prefix ? 1 : 2)]; var args = new ParameterExpression[count]; int i = 0; temps[i] = Parameter(index.Object!.Type, name: null); block[i] = Assign(temps[i], index.Object); i++; while (i <= count) { Expression arg = index.GetArgument(i - 1); args[i - 1] = temps[i] = Parameter(arg.Type, name: null); block[i] = Assign(temps[i], arg); i++; } index = MakeIndex(temps[0], index.Indexer, new TrueReadOnlyCollection<Expression>(args)); if (!prefix) { ParameterExpression lastTemp = temps[i] = Parameter(index.Type, name: null); block[i] = Assign(temps[i], index); i++; Debug.Assert(i == temps.Length); block[i++] = Assign(index, FunctionalOp(lastTemp)); block[i++] = lastTemp; } else { Debug.Assert(i == temps.Length); block[i++] = Assign(index, FunctionalOp(index)); } Debug.Assert(i == block.Length); return Block(new TrueReadOnlyCollection<ParameterExpression>(temps), new TrueReadOnlyCollection<Expression>(block)); } /// <summary> /// Creates a new expression that is like this one, but using the /// supplied children. If all of the children are the same, it will /// return this expression. /// </summary> /// <param name="operand">The <see cref="Operand"/> property of the result.</param> /// <returns>This expression if no children changed, or an expression with the updated children.</returns> public UnaryExpression Update(Expression operand) { if (operand == Operand) { return this; } return Expression.MakeUnary(NodeType, operand, Type, Method); } } public partial class Expression { /// <summary> /// Creates a <see cref="UnaryExpression"/>, given an operand, by calling the appropriate factory method. /// </summary> /// <param name="unaryType">The <see cref="ExpressionType"/> that specifies the type of unary operation.</param> /// <param name="operand">An <see cref="Expression"/> that represents the operand.</param> /// <param name="type">The <see cref="Type"/> that specifies the type to be converted to (pass null if not applicable).</param> /// <returns>The <see cref="UnaryExpression"/> that results from calling the appropriate factory method.</returns> /// <exception cref="ArgumentException">Thrown when <paramref name="unaryType"/> does not correspond to a unary expression.</exception> /// <exception cref="ArgumentNullException">Thrown when <paramref name="operand"/> is null.</exception> public static UnaryExpression MakeUnary(ExpressionType unaryType, Expression operand, Type type) { return MakeUnary(unaryType, operand, type, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/>, given an operand and implementing method, by calling the appropriate factory method. /// </summary> /// <param name="unaryType">The <see cref="ExpressionType"/> that specifies the type of unary operation.</param> /// <param name="operand">An <see cref="Expression"/> that represents the operand.</param> /// <param name="type">The <see cref="Type"/> that specifies the type to be converted to (pass null if not applicable).</param> /// <param name="method">The <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>The <see cref="UnaryExpression"/> that results from calling the appropriate factory method.</returns> /// <exception cref="ArgumentException">Thrown when <paramref name="unaryType"/> does not correspond to a unary expression.</exception> /// <exception cref="ArgumentNullException">Thrown when <paramref name="operand"/> is null.</exception> public static UnaryExpression MakeUnary(ExpressionType unaryType, Expression operand, Type type, MethodInfo? method) => unaryType switch { ExpressionType.Negate => Negate(operand, method), ExpressionType.NegateChecked => NegateChecked(operand, method), ExpressionType.Not => Not(operand, method), ExpressionType.IsFalse => IsFalse(operand, method), ExpressionType.IsTrue => IsTrue(operand, method), ExpressionType.OnesComplement => OnesComplement(operand, method), ExpressionType.ArrayLength => ArrayLength(operand), ExpressionType.Convert => Convert(operand, type, method), ExpressionType.ConvertChecked => ConvertChecked(operand, type, method), ExpressionType.Throw => Throw(operand, type), ExpressionType.TypeAs => TypeAs(operand, type), ExpressionType.Quote => Quote(operand), ExpressionType.UnaryPlus => UnaryPlus(operand, method), ExpressionType.Unbox => Unbox(operand, type), ExpressionType.Increment => Increment(operand, method), ExpressionType.Decrement => Decrement(operand, method), ExpressionType.PreIncrementAssign => PreIncrementAssign(operand, method), ExpressionType.PostIncrementAssign => PostIncrementAssign(operand, method), ExpressionType.PreDecrementAssign => PreDecrementAssign(operand, method), ExpressionType.PostDecrementAssign => PostDecrementAssign(operand, method), _ => throw Error.UnhandledUnary(unaryType, nameof(unaryType)), }; private static UnaryExpression GetUserDefinedUnaryOperatorOrThrow(ExpressionType unaryType, string name, Expression operand) { UnaryExpression? u = GetUserDefinedUnaryOperator(unaryType, name, operand); if (u != null) { ValidateParamswithOperandsOrThrow(u.Method!.GetParametersCached()[0].ParameterType, operand.Type, unaryType, name); return u; } throw Error.UnaryOperatorNotDefined(unaryType, operand.Type); } [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2072:UnrecognizedReflectionPattern", Justification = "The trimmer doesn't remove operators when System.Linq.Expressions is used. See https://github.com/mono/linker/pull/2125.")] private static UnaryExpression? GetUserDefinedUnaryOperator(ExpressionType unaryType, string name, Expression operand) { Type operandType = operand.Type; Type[] types = new Type[] { operandType }; Type nnOperandType = operandType.GetNonNullableType(); MethodInfo? method = nnOperandType.GetAnyStaticMethodValidated(name, types); if (method != null) { return new UnaryExpression(unaryType, operand, method.ReturnType, method); } // try lifted call if (operandType.IsNullableType()) { types[0] = nnOperandType; method = nnOperandType.GetAnyStaticMethodValidated(name, types); if (method != null && method.ReturnType.IsValueType && !method.ReturnType.IsNullableType()) { return new UnaryExpression(unaryType, operand, method.ReturnType.GetNullableType(), method); } } return null; } private static UnaryExpression GetMethodBasedUnaryOperator(ExpressionType unaryType, Expression operand, MethodInfo method) { Debug.Assert(method != null); ValidateOperator(method); ParameterInfo[] pms = method.GetParametersCached(); if (pms.Length != 1) throw Error.IncorrectNumberOfMethodCallArguments(method, nameof(method)); if (ParameterIsAssignable(pms[0], operand.Type)) { ValidateParamswithOperandsOrThrow(pms[0].ParameterType, operand.Type, unaryType, method.Name); return new UnaryExpression(unaryType, operand, method.ReturnType, method); } // check for lifted call if (operand.Type.IsNullableType() && ParameterIsAssignable(pms[0], operand.Type.GetNonNullableType()) && method.ReturnType.IsValueType && !method.ReturnType.IsNullableType()) { return new UnaryExpression(unaryType, operand, method.ReturnType.GetNullableType(), method); } throw Error.OperandTypesDoNotMatchParameters(unaryType, method.Name); } private static UnaryExpression GetUserDefinedCoercionOrThrow(ExpressionType coercionType, Expression expression, Type convertToType) { UnaryExpression? u = GetUserDefinedCoercion(coercionType, expression, convertToType); if (u != null) { return u; } throw Error.CoercionOperatorNotDefined(expression.Type, convertToType); } private static UnaryExpression? GetUserDefinedCoercion(ExpressionType coercionType, Expression expression, Type convertToType) { MethodInfo? method = TypeUtils.GetUserDefinedCoercionMethod(expression.Type, convertToType); if (method != null) { return new UnaryExpression(coercionType, expression, convertToType, method); } else { return null; } } private static UnaryExpression GetMethodBasedCoercionOperator(ExpressionType unaryType, Expression operand, Type convertToType, MethodInfo method) { Debug.Assert(method != null); ValidateOperator(method); ParameterInfo[] pms = method.GetParametersCached(); if (pms.Length != 1) { throw Error.IncorrectNumberOfMethodCallArguments(method, nameof(method)); } if (ParameterIsAssignable(pms[0], operand.Type) && TypeUtils.AreEquivalent(method.ReturnType, convertToType)) { return new UnaryExpression(unaryType, operand, method.ReturnType, method); } // check for lifted call if ((operand.Type.IsNullableType() || convertToType.IsNullableType()) && ParameterIsAssignable(pms[0], operand.Type.GetNonNullableType()) && (TypeUtils.AreEquivalent(method.ReturnType, convertToType.GetNonNullableType()) || TypeUtils.AreEquivalent(method.ReturnType, convertToType))) { return new UnaryExpression(unaryType, operand, convertToType, method); } throw Error.OperandTypesDoNotMatchParameters(unaryType, method.Name); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents an arithmetic negation operation. /// </summary> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.Negate"/> and the <see cref="UnaryExpression.Operand"/> properties set to the specified value.</returns> /// <exception cref="ArgumentNullException">Thrown when <paramref name="expression"/> is null.</exception> /// <exception cref="InvalidOperationException">Thrown when the unary minus operator is not defined for <paramref name="expression"/>.Type.</exception> public static UnaryExpression Negate(Expression expression) { return Negate(expression, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents an arithmetic negation operation. /// </summary> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="method">A <see cref="MethodInfo"/> to set the <see cref="UnaryExpression.Method"/> property equal to.</param> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.Negate"/> and the <see cref="UnaryExpression.Operand"/> and <see cref="UnaryExpression.Method"/> properties set to the specified value.</returns> /// <exception cref="ArgumentNullException">Thrown when <paramref name="expression"/> is null.</exception> /// <exception cref="ArgumentException">Thrown when <paramref name="method"/> is not null and the method it represents returns void, is not static (Shared in Visual Basic), or does not take exactly one argument.</exception> /// <exception cref="InvalidOperationException">Thrown when <paramref name="method"/> is null and the unary minus operator is not defined for <paramref name="expression"/>.Type (or its corresponding non-nullable type if it is a nullable value type) is not assignable to the argument type of the method represented by method.</exception> public static UnaryExpression Negate(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsArithmetic() && !expression.Type.IsUnsignedInt()) { return new UnaryExpression(ExpressionType.Negate, expression, expression.Type, null); } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.Negate, "op_UnaryNegation", expression); } return GetMethodBasedUnaryOperator(ExpressionType.Negate, expression, method); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents a unary plus operation. /// </summary> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.UnaryPlus"/> and the <see cref="UnaryExpression.Operand"/> property set to the specified value.</returns> /// <exception cref="ArgumentNullException">Thrown when <paramref name="expression"/> is null.</exception> /// <exception cref="InvalidOperationException">Thrown when the unary minus operator is not defined for <paramref name="expression"/>.Type.</exception> public static UnaryExpression UnaryPlus(Expression expression) { return UnaryPlus(expression, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents a unary plus operation. /// </summary> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="method">A <see cref="MethodInfo"/> to set the <see cref="UnaryExpression.Method"/> property equal to.</param> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.UnaryPlus"/> and the <see cref="UnaryExpression.Operand"/> and <see cref="UnaryExpression.Method"/>property set to the specified value.</returns> /// <exception cref="ArgumentNullException">Thrown when <paramref name="expression"/> is null.</exception> /// <exception cref="ArgumentException">Thrown when <paramref name="method"/> is not null and the method it represents returns void, is not static (Shared in Visual Basic), or does not take exactly one argument.</exception> /// <exception cref="InvalidOperationException">Thrown when <paramref name="method"/> is null and the unary minus operator is not defined for <paramref name="expression"/>.Type (or its corresponding non-nullable type if it is a nullable value type) is not assignable to the argument type of the method represented by method.</exception> public static UnaryExpression UnaryPlus(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsArithmetic()) { return new UnaryExpression(ExpressionType.UnaryPlus, expression, expression.Type, null); } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.UnaryPlus, "op_UnaryPlus", expression); } return GetMethodBasedUnaryOperator(ExpressionType.UnaryPlus, expression, method); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents an arithmetic negation operation that has overflow checking.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.NegateChecked"/> and the <see cref="UnaryExpression.Operand"/> property set to the specified value.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <exception cref="ArgumentNullException">Thrown when <paramref name="expression"/> is null.</exception> /// <exception cref="InvalidOperationException">Thrown when the unary minus operator is not defined for <paramref name="expression"/>.Type.</exception> public static UnaryExpression NegateChecked(Expression expression) { return NegateChecked(expression, method: null); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents an arithmetic negation operation that has overflow checking. The implementing method can be specified.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.NegateChecked"/> and the <see cref="UnaryExpression.Operand"/> and <see cref="UnaryExpression.Method"/> properties set to the specified values.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="method">A <see cref="MethodInfo"/> to set the <see cref="UnaryExpression.Method"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> is null.</exception> /// <exception cref="ArgumentException"> /// <paramref name="method"/> is not null and the method it represents returns void, is not static (Shared in Visual Basic), or does not take exactly one argument.</exception> /// <exception cref="InvalidOperationException"> /// <paramref name="method"/> is null and the unary minus operator is not defined for <paramref name="expression"/>.Type.-or-<paramref name="expression"/>.Type (or its corresponding non-nullable type if it is a nullable value type) is not assignable to the argument type of the method represented by <paramref name="method"/>.</exception> public static UnaryExpression NegateChecked(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsArithmetic() && !expression.Type.IsUnsignedInt()) { return new UnaryExpression(ExpressionType.NegateChecked, expression, expression.Type, null); } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.NegateChecked, "op_UnaryNegation", expression); } return GetMethodBasedUnaryOperator(ExpressionType.NegateChecked, expression, method); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents a bitwise complement operation.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.Not"/> and the <see cref="UnaryExpression.Operand"/> property set to the specified value.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> is null.</exception> /// <exception cref="InvalidOperationException">The unary not operator is not defined for <paramref name="expression"/>.Type.</exception> public static UnaryExpression Not(Expression expression) { return Not(expression, method: null); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents a bitwise complement operation. The implementing method can be specified.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.Not"/> and the <see cref="UnaryExpression.Operand"/> and <see cref="UnaryExpression.Method"/> properties set to the specified values.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="method">A <see cref="MethodInfo"/> to set the <see cref="UnaryExpression.Method"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> is null.</exception> /// <exception cref="ArgumentException"> /// <paramref name="method"/> is not null and the method it represents returns void, is not static (Shared in Visual Basic), or does not take exactly one argument.</exception> /// <exception cref="InvalidOperationException"> /// <paramref name="method"/> is null and the unary not operator is not defined for <paramref name="expression"/>.Type.-or-<paramref name="expression"/>.Type (or its corresponding non-nullable type if it is a nullable value type) is not assignable to the argument type of the method represented by <paramref name="method"/>.</exception> public static UnaryExpression Not(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsIntegerOrBool()) { return new UnaryExpression(ExpressionType.Not, expression, expression.Type, null); } UnaryExpression? u = GetUserDefinedUnaryOperator(ExpressionType.Not, "op_LogicalNot", expression); if (u != null) { return u; } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.Not, "op_OnesComplement", expression); } return GetMethodBasedUnaryOperator(ExpressionType.Not, expression, method); } /// <summary> /// Returns whether the expression evaluates to false. /// </summary> /// <param name="expression">An <see cref="Expression"/> to evaluate.</param> /// <returns>An instance of <see cref="UnaryExpression"/>.</returns> public static UnaryExpression IsFalse(Expression expression) { return IsFalse(expression, method: null); } /// <summary> /// Returns whether the expression evaluates to false. /// </summary> /// <param name="expression">An <see cref="Expression"/> to evaluate.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>An instance of <see cref="UnaryExpression"/>.</returns> public static UnaryExpression IsFalse(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsBool()) { return new UnaryExpression(ExpressionType.IsFalse, expression, expression.Type, null); } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.IsFalse, "op_False", expression); } return GetMethodBasedUnaryOperator(ExpressionType.IsFalse, expression, method); } /// <summary> /// Returns whether the expression evaluates to true. /// </summary> /// <param name="expression">An <see cref="Expression"/> to evaluate.</param> /// <returns>An instance of <see cref="UnaryExpression"/>.</returns> public static UnaryExpression IsTrue(Expression expression) { return IsTrue(expression, method: null); } /// <summary> /// Returns whether the expression evaluates to true. /// </summary> /// <param name="expression">An <see cref="Expression"/> to evaluate.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>An instance of <see cref="UnaryExpression"/>.</returns> public static UnaryExpression IsTrue(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsBool()) { return new UnaryExpression(ExpressionType.IsTrue, expression, expression.Type, null); } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.IsTrue, "op_True", expression); } return GetMethodBasedUnaryOperator(ExpressionType.IsTrue, expression, method); } /// <summary> /// Returns the expression representing the ones complement. /// </summary> /// <param name="expression">An <see cref="Expression"/>.</param> /// <returns>An instance of <see cref="UnaryExpression"/>.</returns> public static UnaryExpression OnesComplement(Expression expression) { return OnesComplement(expression, method: null); } /// <summary> /// Returns the expression representing the ones complement. /// </summary> /// <param name="expression">An <see cref="Expression"/>.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>An instance of <see cref="UnaryExpression"/>.</returns> public static UnaryExpression OnesComplement(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsInteger()) { return new UnaryExpression(ExpressionType.OnesComplement, expression, expression.Type, null); } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.OnesComplement, "op_OnesComplement", expression); } return GetMethodBasedUnaryOperator(ExpressionType.OnesComplement, expression, method); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents an explicit reference or boxing conversion where null is supplied if the conversion fails.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.TypeAs"/> and the <see cref="UnaryExpression.Operand"/> and <see cref="Expression.Type"/> properties set to the specified values.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="type">A <see cref="System.Type"/> to set the <see cref="Type"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> or <paramref name="type"/> is null.</exception> public static UnaryExpression TypeAs(Expression expression, Type type) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); ContractUtils.RequiresNotNull(type, nameof(type)); TypeUtils.ValidateType(type, nameof(type)); if (type.IsValueType && !type.IsNullableType()) { throw Error.IncorrectTypeForTypeAs(type, nameof(type)); } return new UnaryExpression(ExpressionType.TypeAs, expression, type, null); } /// <summary> /// <summary>Creates a <see cref="UnaryExpression"/> that represents an explicit unboxing.</summary> /// </summary> /// <param name="expression">An <see cref="Expression"/> to unbox.</param> /// <param name="type">The new <see cref="System.Type"/> of the expression.</param> /// <returns>An instance of <see cref="UnaryExpression"/>.</returns> public static UnaryExpression Unbox(Expression expression, Type type) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); ContractUtils.RequiresNotNull(type, nameof(type)); if (!expression.Type.IsInterface && expression.Type != typeof(object)) { throw Error.InvalidUnboxType(nameof(expression)); } if (!type.IsValueType) throw Error.InvalidUnboxType(nameof(type)); TypeUtils.ValidateType(type, nameof(type)); return new UnaryExpression(ExpressionType.Unbox, expression, type, null); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents a conversion operation.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.Convert"/> and the <see cref="UnaryExpression.Operand"/> and <see cref="Expression.Type"/> properties set to the specified values.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="type">A <see cref="System.Type"/> to set the <see cref="Type"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> or <paramref name="type"/> is null.</exception> /// <exception cref="InvalidOperationException">No conversion operator is defined between <paramref name="expression"/>.Type and <paramref name="type"/>.</exception> public static UnaryExpression Convert(Expression expression, Type type) { return Convert(expression, type, method: null); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents a conversion operation for which the implementing method is specified.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.Convert"/> and the <see cref="UnaryExpression.Operand"/>, <see cref="Expression.Type"/>, and <see cref="UnaryExpression.Method"/> properties set to the specified values.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="type">A <see cref="System.Type"/> to set the <see cref="Type"/> property equal to.</param> /// <param name="method">A <see cref="MethodInfo"/> to set the <see cref="UnaryExpression.Method"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> or <paramref name="type"/> is null.</exception> /// <exception cref="ArgumentException"> /// <paramref name="method"/> is not null and the method it represents returns void, is not static (Shared in Visual Basic), or does not take exactly one argument.</exception> /// <exception cref="AmbiguousMatchException">More than one method that matches the <paramref name="method"/> description was found.</exception> /// <exception cref="InvalidOperationException">No conversion operator is defined between <paramref name="expression"/>.Type and <paramref name="type"/>.-or-<paramref name="expression"/>.Type is not assignable to the argument type of the method represented by <paramref name="method"/>.-or-The return type of the method represented by <paramref name="method"/> is not assignable to <paramref name="type"/>.-or-<paramref name="expression"/>.Type or <paramref name="type"/> is a nullable value type and the corresponding non-nullable value type does not equal the argument type or the return type, respectively, of the method represented by <paramref name="method"/>.</exception> public static UnaryExpression Convert(Expression expression, Type type, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); ContractUtils.RequiresNotNull(type, nameof(type)); TypeUtils.ValidateType(type, nameof(type)); if (method == null) { if (expression.Type.HasIdentityPrimitiveOrNullableConversionTo(type) || expression.Type.HasReferenceConversionTo(type)) { return new UnaryExpression(ExpressionType.Convert, expression, type, null); } return GetUserDefinedCoercionOrThrow(ExpressionType.Convert, expression, type); } return GetMethodBasedCoercionOperator(ExpressionType.Convert, expression, type, method); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents a conversion operation that throws an exception if the target type is overflowed.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.ConvertChecked"/> and the <see cref="UnaryExpression.Operand"/> and <see cref="Expression.Type"/> properties set to the specified values.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="type">A <see cref="System.Type"/> to set the <see cref="Type"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> or <paramref name="type"/> is null.</exception> /// <exception cref="InvalidOperationException">No conversion operator is defined between <paramref name="expression"/>.Type and <paramref name="type"/>.</exception> public static UnaryExpression ConvertChecked(Expression expression, Type type) { return ConvertChecked(expression, type, method: null); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents a conversion operation that throws an exception if the target type is overflowed and for which the implementing method is specified.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.ConvertChecked"/> and the <see cref="UnaryExpression.Operand"/>, <see cref="Expression.Type"/>, and <see cref="UnaryExpression.Method"/> properties set to the specified values.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="type">A <see cref="System.Type"/> to set the <see cref="Type"/> property equal to.</param> /// <param name="method">A <see cref="MethodInfo"/> to set the <see cref="UnaryExpression.Method"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> or <paramref name="type"/> is null.</exception> /// <exception cref="ArgumentException"> /// <paramref name="method"/> is not null and the method it represents returns void, is not static (Shared in Visual Basic), or does not take exactly one argument.</exception> /// <exception cref="AmbiguousMatchException">More than one method that matches the <paramref name="method"/> description was found.</exception> /// <exception cref="InvalidOperationException">No conversion operator is defined between <paramref name="expression"/>.Type and <paramref name="type"/>.-or-<paramref name="expression"/>.Type is not assignable to the argument type of the method represented by <paramref name="method"/>.-or-The return type of the method represented by <paramref name="method"/> is not assignable to <paramref name="type"/>.-or-<paramref name="expression"/>.Type or <paramref name="type"/> is a nullable value type and the corresponding non-nullable value type does not equal the argument type or the return type, respectively, of the method represented by <paramref name="method"/>.</exception> public static UnaryExpression ConvertChecked(Expression expression, Type type, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); ContractUtils.RequiresNotNull(type, nameof(type)); TypeUtils.ValidateType(type, nameof(type)); if (method == null) { if (expression.Type.HasIdentityPrimitiveOrNullableConversionTo(type)) { return new UnaryExpression(ExpressionType.ConvertChecked, expression, type, null); } if (expression.Type.HasReferenceConversionTo(type)) { return new UnaryExpression(ExpressionType.Convert, expression, type, null); } return GetUserDefinedCoercionOrThrow(ExpressionType.ConvertChecked, expression, type); } return GetMethodBasedCoercionOperator(ExpressionType.ConvertChecked, expression, type, method); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents getting the length of a one-dimensional, zero-based array.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.ArrayLength"/> and the <see cref="UnaryExpression.Operand"/> property equal to <paramref name="array"/>.</returns> /// <param name="array">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="array"/> is null.</exception> /// <exception cref="ArgumentException"> /// <paramref name="array"/>.Type does not represent a single-dimensional, zero-based array type.</exception> public static UnaryExpression ArrayLength(Expression array) { ExpressionUtils.RequiresCanRead(array, nameof(array)); if (!array.Type.IsSZArray) { if (!array.Type.IsArray || !typeof(Array).IsAssignableFrom(array.Type)) { throw Error.ArgumentMustBeArray(nameof(array)); } throw Error.ArgumentMustBeSingleDimensionalArrayType(nameof(array)); } return new UnaryExpression(ExpressionType.ArrayLength, array, typeof(int), null); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents an expression that has a constant value of type <see cref="Expression"/>.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.Quote"/> and the <see cref="UnaryExpression.Operand"/> property set to the specified value.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> is null.</exception> public static UnaryExpression Quote(Expression expression) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); LambdaExpression? lambda = expression as LambdaExpression; if (lambda == null) { throw Error.QuotedExpressionMustBeLambda(nameof(expression)); } return new UnaryExpression(ExpressionType.Quote, lambda, lambda.PublicType, null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents a rethrowing of an exception. /// </summary> /// <returns>A <see cref="UnaryExpression"/> that represents a rethrowing of an exception.</returns> public static UnaryExpression Rethrow() { return Throw(value: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents a rethrowing of an exception with a given type. /// </summary> /// <param name="type">The new <see cref="Type"/> of the expression.</param> /// <returns>A <see cref="UnaryExpression"/> that represents a rethrowing of an exception.</returns> public static UnaryExpression Rethrow(Type type) { return Throw(null, type); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents a throwing of an exception. /// </summary> /// <param name="value">An <see cref="Expression"/>.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the exception.</returns> public static UnaryExpression Throw(Expression? value) { return Throw(value, typeof(void)); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents a throwing of a value with a given type. /// </summary> /// <param name="value">An <see cref="Expression"/>.</param> /// <param name="type">The new <see cref="Type"/> of the expression.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the exception.</returns> public static UnaryExpression Throw(Expression? value, Type type) { ContractUtils.RequiresNotNull(type, nameof(type)); TypeUtils.ValidateType(type, nameof(type)); if (value != null) { ExpressionUtils.RequiresCanRead(value, nameof(value)); if (value.Type.IsValueType) throw Error.ArgumentMustNotHaveValueType(nameof(value)); } return new UnaryExpression(ExpressionType.Throw, value!, type, null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents the incrementing of the expression by 1. /// </summary> /// <param name="expression">An <see cref="Expression"/> to increment.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the incremented expression.</returns> public static UnaryExpression Increment(Expression expression) { return Increment(expression, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents the incrementing of the expression by 1. /// </summary> /// <param name="expression">An <see cref="Expression"/> to increment.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the incremented expression.</returns> public static UnaryExpression Increment(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsArithmetic()) { return new UnaryExpression(ExpressionType.Increment, expression, expression.Type, null); } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.Increment, "op_Increment", expression); } return GetMethodBasedUnaryOperator(ExpressionType.Increment, expression, method); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents the decrementing of the expression by 1. /// </summary> /// <param name="expression">An <see cref="Expression"/> to decrement.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the decremented expression.</returns> public static UnaryExpression Decrement(Expression expression) { return Decrement(expression, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents the decrementing of the expression by 1. /// </summary> /// <param name="expression">An <see cref="Expression"/> to decrement.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the decremented expression.</returns> public static UnaryExpression Decrement(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsArithmetic()) { return new UnaryExpression(ExpressionType.Decrement, expression, expression.Type, null); } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.Decrement, "op_Decrement", expression); } return GetMethodBasedUnaryOperator(ExpressionType.Decrement, expression, method); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that increments the expression by 1 /// and assigns the result back to the expression. /// </summary> /// <param name="expression">An <see cref="Expression"/> to apply the operations on.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the resultant expression.</returns> public static UnaryExpression PreIncrementAssign(Expression expression) { return MakeOpAssignUnary(ExpressionType.PreIncrementAssign, expression, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that increments the expression by 1 /// and assigns the result back to the expression. /// </summary> /// <param name="expression">An <see cref="Expression"/> to apply the operations on.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the resultant expression.</returns> public static UnaryExpression PreIncrementAssign(Expression expression, MethodInfo? method) { return MakeOpAssignUnary(ExpressionType.PreIncrementAssign, expression, method); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that decrements the expression by 1 /// and assigns the result back to the expression. /// </summary> /// <param name="expression">An <see cref="Expression"/> to apply the operations on.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the resultant expression.</returns> public static UnaryExpression PreDecrementAssign(Expression expression) { return MakeOpAssignUnary(ExpressionType.PreDecrementAssign, expression, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that decrements the expression by 1 /// and assigns the result back to the expression. /// </summary> /// <param name="expression">An <see cref="Expression"/> to apply the operations on.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the resultant expression.</returns> public static UnaryExpression PreDecrementAssign(Expression expression, MethodInfo? method) { return MakeOpAssignUnary(ExpressionType.PreDecrementAssign, expression, method); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents the assignment of the expression /// followed by a subsequent increment by 1 of the original expression. /// </summary> /// <param name="expression">An <see cref="Expression"/> to apply the operations on.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the resultant expression.</returns> public static UnaryExpression PostIncrementAssign(Expression expression) { return MakeOpAssignUnary(ExpressionType.PostIncrementAssign, expression, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents the assignment of the expression /// followed by a subsequent increment by 1 of the original expression. /// </summary> /// <param name="expression">An <see cref="Expression"/> to apply the operations on.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the resultant expression.</returns> public static UnaryExpression PostIncrementAssign(Expression expression, MethodInfo? method) { return MakeOpAssignUnary(ExpressionType.PostIncrementAssign, expression, method); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents the assignment of the expression /// followed by a subsequent decrement by 1 of the original expression. /// </summary> /// <param name="expression">An <see cref="Expression"/> to apply the operations on.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the resultant expression.</returns> public static UnaryExpression PostDecrementAssign(Expression expression) { return MakeOpAssignUnary(ExpressionType.PostDecrementAssign, expression, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents the assignment of the expression /// followed by a subsequent decrement by 1 of the original expression. /// </summary> /// <param name="expression">An <see cref="Expression"/> to apply the operations on.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the resultant expression.</returns> public static UnaryExpression PostDecrementAssign(Expression expression, MethodInfo? method) { return MakeOpAssignUnary(ExpressionType.PostDecrementAssign, expression, method); } private static UnaryExpression MakeOpAssignUnary(ExpressionType kind, Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); RequiresCanWrite(expression, nameof(expression)); UnaryExpression result; if (method == null) { if (expression.Type.IsArithmetic()) { return new UnaryExpression(kind, expression, expression.Type, null); } string name; if (kind == ExpressionType.PreIncrementAssign || kind == ExpressionType.PostIncrementAssign) { name = "op_Increment"; } else { Debug.Assert(kind == ExpressionType.PreDecrementAssign || kind == ExpressionType.PostDecrementAssign); name = "op_Decrement"; } result = GetUserDefinedUnaryOperatorOrThrow(kind, name, expression); } else { result = GetMethodBasedUnaryOperator(kind, expression, method); } // return type must be assignable back to the operand type if (!TypeUtils.AreReferenceAssignable(expression.Type, result.Type)) { throw Error.UserDefinedOpMustHaveValidReturnType(kind, method!.Name); } return result; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Dynamic.Utils; using System.Reflection; using System.Runtime.CompilerServices; namespace System.Linq.Expressions { /// <summary> /// Represents an expression that has a unary operator. /// </summary> [DebuggerTypeProxy(typeof(UnaryExpressionProxy))] public sealed class UnaryExpression : Expression { internal UnaryExpression(ExpressionType nodeType, Expression expression, Type type, MethodInfo? method) { Operand = expression; Method = method; NodeType = nodeType; Type = type; } /// <summary> /// Gets the static type of the expression that this <see cref="Expression"/> represents. (Inherited from <see cref="Expression"/>.) /// </summary> /// <returns>The <see cref="System.Type"/> that represents the static type of the expression.</returns> public sealed override Type Type { get; } /// <summary> /// Returns the node type of this <see cref="Expression"/>. (Inherited from <see cref="Expression"/>.) /// </summary> /// <returns>The <see cref="ExpressionType"/> that represents this expression.</returns> public sealed override ExpressionType NodeType { get; } /// <summary> /// Gets the operand of the unary operation. /// </summary> /// <returns> An <see cref="Expression"/> that represents the operand of the unary operation. Returns null if node type is <see cref="ExpressionType.Throw"/> with no operand.</returns> public Expression Operand { get; } /// <summary> /// Gets the implementing method for the unary operation. /// </summary> /// <returns>The <see cref="MethodInfo"/> that represents the implementing method.</returns> public MethodInfo? Method { get; } /// <summary> /// Gets a value that indicates whether the expression tree node represents a lifted call to an operator. /// </summary> /// <returns>true if the node represents a lifted call; otherwise, false.</returns> public bool IsLifted { get { if (NodeType == ExpressionType.TypeAs || NodeType == ExpressionType.Quote || NodeType == ExpressionType.Throw) { return false; } bool operandIsNullable = Operand.Type.IsNullableType(); bool resultIsNullable = this.Type.IsNullableType(); if (Method != null) { return (operandIsNullable && !TypeUtils.AreEquivalent(Method.GetParametersCached()[0].ParameterType, Operand.Type)) || (resultIsNullable && !TypeUtils.AreEquivalent(Method.ReturnType, this.Type)); } return operandIsNullable || resultIsNullable; } } /// <summary> /// Gets a value that indicates whether the expression tree node represents a lifted call to an operator whose return type is lifted to a nullable type. /// </summary> /// <returns>true if the operator's return type is lifted to a nullable type; otherwise, false.</returns> public bool IsLiftedToNull => IsLifted && this.Type.IsNullableType(); /// <summary> /// Dispatches to the specific visit method for this node type. /// </summary> protected internal override Expression Accept(ExpressionVisitor visitor) { return visitor.VisitUnary(this); } /// <summary> /// Gets a value that indicates whether the expression tree node can be reduced. /// </summary> public override bool CanReduce { get { switch (NodeType) { case ExpressionType.PreIncrementAssign: case ExpressionType.PreDecrementAssign: case ExpressionType.PostIncrementAssign: case ExpressionType.PostDecrementAssign: return true; } return false; } } /// <summary> /// Reduces the expression node to a simpler expression. /// If CanReduce returns true, this should return a valid expression. /// This method is allowed to return another node which itself /// must be reduced. /// </summary> /// <returns>The reduced expression.</returns> public override Expression Reduce() { if (CanReduce) { switch (Operand.NodeType) { case ExpressionType.Index: return ReduceIndex(); case ExpressionType.MemberAccess: return ReduceMember(); default: Debug.Assert(Operand.NodeType == ExpressionType.Parameter); return ReduceVariable(); } } return this; } private bool IsPrefix { get { return NodeType == ExpressionType.PreIncrementAssign || NodeType == ExpressionType.PreDecrementAssign; } } private UnaryExpression FunctionalOp(Expression operand) { ExpressionType functional; if (NodeType == ExpressionType.PreIncrementAssign || NodeType == ExpressionType.PostIncrementAssign) { functional = ExpressionType.Increment; } else { Debug.Assert(NodeType == ExpressionType.PreDecrementAssign || NodeType == ExpressionType.PostDecrementAssign); functional = ExpressionType.Decrement; } return new UnaryExpression(functional, operand, operand.Type, Method); } private Expression ReduceVariable() { if (IsPrefix) { // (op) var // ... is reduced into ... // var = op(var) return Assign(Operand, FunctionalOp(Operand)); } // var (op) // ... is reduced into ... // temp = var // var = op(var) // temp ParameterExpression temp = Parameter(Operand.Type, name: null); return Block( new TrueReadOnlyCollection<ParameterExpression>(temp), new TrueReadOnlyCollection<Expression>( Assign(temp, Operand), Assign(Operand, FunctionalOp(temp)), temp ) ); } private Expression ReduceMember() { var member = (MemberExpression)Operand; if (member.Expression == null) { //static member, reduce the same as variable return ReduceVariable(); } else { ParameterExpression temp1 = Parameter(member.Expression.Type, name: null); BinaryExpression initTemp1 = Assign(temp1, member.Expression); member = MakeMemberAccess(temp1, member.Member); if (IsPrefix) { // (op) value.member // ... is reduced into ... // temp1 = value // temp1.member = op(temp1.member) return Block( new TrueReadOnlyCollection<ParameterExpression>(temp1), new TrueReadOnlyCollection<Expression>( initTemp1, Assign(member, FunctionalOp(member)) ) ); } // value.member (op) // ... is reduced into ... // temp1 = value // temp2 = temp1.member // temp1.member = op(temp2) // temp2 ParameterExpression temp2 = Parameter(member.Type, name: null); return Block( new TrueReadOnlyCollection<ParameterExpression>(temp1, temp2), new TrueReadOnlyCollection<Expression>( initTemp1, Assign(temp2, member), Assign(member, FunctionalOp(temp2)), temp2 ) ); } } private Expression ReduceIndex() { // left[a0, a1, ... aN] (op) // // ... is reduced into ... // // tempObj = left // tempArg0 = a0 // ... // tempArgN = aN // tempValue = tempObj[tempArg0, ... tempArgN] // tempObj[tempArg0, ... tempArgN] = op(tempValue) // tempValue bool prefix = IsPrefix; var index = (IndexExpression)Operand; int count = index.ArgumentCount; var block = new Expression[count + (prefix ? 2 : 4)]; var temps = new ParameterExpression[count + (prefix ? 1 : 2)]; var args = new ParameterExpression[count]; int i = 0; temps[i] = Parameter(index.Object!.Type, name: null); block[i] = Assign(temps[i], index.Object); i++; while (i <= count) { Expression arg = index.GetArgument(i - 1); args[i - 1] = temps[i] = Parameter(arg.Type, name: null); block[i] = Assign(temps[i], arg); i++; } index = MakeIndex(temps[0], index.Indexer, new TrueReadOnlyCollection<Expression>(args)); if (!prefix) { ParameterExpression lastTemp = temps[i] = Parameter(index.Type, name: null); block[i] = Assign(temps[i], index); i++; Debug.Assert(i == temps.Length); block[i++] = Assign(index, FunctionalOp(lastTemp)); block[i++] = lastTemp; } else { Debug.Assert(i == temps.Length); block[i++] = Assign(index, FunctionalOp(index)); } Debug.Assert(i == block.Length); return Block(new TrueReadOnlyCollection<ParameterExpression>(temps), new TrueReadOnlyCollection<Expression>(block)); } /// <summary> /// Creates a new expression that is like this one, but using the /// supplied children. If all of the children are the same, it will /// return this expression. /// </summary> /// <param name="operand">The <see cref="Operand"/> property of the result.</param> /// <returns>This expression if no children changed, or an expression with the updated children.</returns> public UnaryExpression Update(Expression operand) { if (operand == Operand) { return this; } return Expression.MakeUnary(NodeType, operand, Type, Method); } } public partial class Expression { /// <summary> /// Creates a <see cref="UnaryExpression"/>, given an operand, by calling the appropriate factory method. /// </summary> /// <param name="unaryType">The <see cref="ExpressionType"/> that specifies the type of unary operation.</param> /// <param name="operand">An <see cref="Expression"/> that represents the operand.</param> /// <param name="type">The <see cref="Type"/> that specifies the type to be converted to (pass null if not applicable).</param> /// <returns>The <see cref="UnaryExpression"/> that results from calling the appropriate factory method.</returns> /// <exception cref="ArgumentException">Thrown when <paramref name="unaryType"/> does not correspond to a unary expression.</exception> /// <exception cref="ArgumentNullException">Thrown when <paramref name="operand"/> is null.</exception> public static UnaryExpression MakeUnary(ExpressionType unaryType, Expression operand, Type type) { return MakeUnary(unaryType, operand, type, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/>, given an operand and implementing method, by calling the appropriate factory method. /// </summary> /// <param name="unaryType">The <see cref="ExpressionType"/> that specifies the type of unary operation.</param> /// <param name="operand">An <see cref="Expression"/> that represents the operand.</param> /// <param name="type">The <see cref="Type"/> that specifies the type to be converted to (pass null if not applicable).</param> /// <param name="method">The <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>The <see cref="UnaryExpression"/> that results from calling the appropriate factory method.</returns> /// <exception cref="ArgumentException">Thrown when <paramref name="unaryType"/> does not correspond to a unary expression.</exception> /// <exception cref="ArgumentNullException">Thrown when <paramref name="operand"/> is null.</exception> public static UnaryExpression MakeUnary(ExpressionType unaryType, Expression operand, Type type, MethodInfo? method) => unaryType switch { ExpressionType.Negate => Negate(operand, method), ExpressionType.NegateChecked => NegateChecked(operand, method), ExpressionType.Not => Not(operand, method), ExpressionType.IsFalse => IsFalse(operand, method), ExpressionType.IsTrue => IsTrue(operand, method), ExpressionType.OnesComplement => OnesComplement(operand, method), ExpressionType.ArrayLength => ArrayLength(operand), ExpressionType.Convert => Convert(operand, type, method), ExpressionType.ConvertChecked => ConvertChecked(operand, type, method), ExpressionType.Throw => Throw(operand, type), ExpressionType.TypeAs => TypeAs(operand, type), ExpressionType.Quote => Quote(operand), ExpressionType.UnaryPlus => UnaryPlus(operand, method), ExpressionType.Unbox => Unbox(operand, type), ExpressionType.Increment => Increment(operand, method), ExpressionType.Decrement => Decrement(operand, method), ExpressionType.PreIncrementAssign => PreIncrementAssign(operand, method), ExpressionType.PostIncrementAssign => PostIncrementAssign(operand, method), ExpressionType.PreDecrementAssign => PreDecrementAssign(operand, method), ExpressionType.PostDecrementAssign => PostDecrementAssign(operand, method), _ => throw Error.UnhandledUnary(unaryType, nameof(unaryType)), }; private static UnaryExpression GetUserDefinedUnaryOperatorOrThrow(ExpressionType unaryType, string name, Expression operand) { UnaryExpression? u = GetUserDefinedUnaryOperator(unaryType, name, operand); if (u != null) { ValidateParamswithOperandsOrThrow(u.Method!.GetParametersCached()[0].ParameterType, operand.Type, unaryType, name); return u; } throw Error.UnaryOperatorNotDefined(unaryType, operand.Type); } [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2072:UnrecognizedReflectionPattern", Justification = "The trimmer doesn't remove operators when System.Linq.Expressions is used. See https://github.com/mono/linker/pull/2125.")] private static UnaryExpression? GetUserDefinedUnaryOperator(ExpressionType unaryType, string name, Expression operand) { Type operandType = operand.Type; Type[] types = new Type[] { operandType }; Type nnOperandType = operandType.GetNonNullableType(); MethodInfo? method = nnOperandType.GetAnyStaticMethodValidated(name, types); if (method != null) { return new UnaryExpression(unaryType, operand, method.ReturnType, method); } // try lifted call if (operandType.IsNullableType()) { types[0] = nnOperandType; method = nnOperandType.GetAnyStaticMethodValidated(name, types); if (method != null && method.ReturnType.IsValueType && !method.ReturnType.IsNullableType()) { return new UnaryExpression(unaryType, operand, method.ReturnType.GetNullableType(), method); } } return null; } private static UnaryExpression GetMethodBasedUnaryOperator(ExpressionType unaryType, Expression operand, MethodInfo method) { Debug.Assert(method != null); ValidateOperator(method); ParameterInfo[] pms = method.GetParametersCached(); if (pms.Length != 1) throw Error.IncorrectNumberOfMethodCallArguments(method, nameof(method)); if (ParameterIsAssignable(pms[0], operand.Type)) { ValidateParamswithOperandsOrThrow(pms[0].ParameterType, operand.Type, unaryType, method.Name); return new UnaryExpression(unaryType, operand, method.ReturnType, method); } // check for lifted call if (operand.Type.IsNullableType() && ParameterIsAssignable(pms[0], operand.Type.GetNonNullableType()) && method.ReturnType.IsValueType && !method.ReturnType.IsNullableType()) { return new UnaryExpression(unaryType, operand, method.ReturnType.GetNullableType(), method); } throw Error.OperandTypesDoNotMatchParameters(unaryType, method.Name); } private static UnaryExpression GetUserDefinedCoercionOrThrow(ExpressionType coercionType, Expression expression, Type convertToType) { UnaryExpression? u = GetUserDefinedCoercion(coercionType, expression, convertToType); if (u != null) { return u; } throw Error.CoercionOperatorNotDefined(expression.Type, convertToType); } private static UnaryExpression? GetUserDefinedCoercion(ExpressionType coercionType, Expression expression, Type convertToType) { MethodInfo? method = TypeUtils.GetUserDefinedCoercionMethod(expression.Type, convertToType); if (method != null) { return new UnaryExpression(coercionType, expression, convertToType, method); } else { return null; } } private static UnaryExpression GetMethodBasedCoercionOperator(ExpressionType unaryType, Expression operand, Type convertToType, MethodInfo method) { Debug.Assert(method != null); ValidateOperator(method); ParameterInfo[] pms = method.GetParametersCached(); if (pms.Length != 1) { throw Error.IncorrectNumberOfMethodCallArguments(method, nameof(method)); } if (ParameterIsAssignable(pms[0], operand.Type) && TypeUtils.AreEquivalent(method.ReturnType, convertToType)) { return new UnaryExpression(unaryType, operand, method.ReturnType, method); } // check for lifted call if ((operand.Type.IsNullableType() || convertToType.IsNullableType()) && ParameterIsAssignable(pms[0], operand.Type.GetNonNullableType()) && (TypeUtils.AreEquivalent(method.ReturnType, convertToType.GetNonNullableType()) || TypeUtils.AreEquivalent(method.ReturnType, convertToType))) { return new UnaryExpression(unaryType, operand, convertToType, method); } throw Error.OperandTypesDoNotMatchParameters(unaryType, method.Name); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents an arithmetic negation operation. /// </summary> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.Negate"/> and the <see cref="UnaryExpression.Operand"/> properties set to the specified value.</returns> /// <exception cref="ArgumentNullException">Thrown when <paramref name="expression"/> is null.</exception> /// <exception cref="InvalidOperationException">Thrown when the unary minus operator is not defined for <paramref name="expression"/>.Type.</exception> public static UnaryExpression Negate(Expression expression) { return Negate(expression, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents an arithmetic negation operation. /// </summary> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="method">A <see cref="MethodInfo"/> to set the <see cref="UnaryExpression.Method"/> property equal to.</param> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.Negate"/> and the <see cref="UnaryExpression.Operand"/> and <see cref="UnaryExpression.Method"/> properties set to the specified value.</returns> /// <exception cref="ArgumentNullException">Thrown when <paramref name="expression"/> is null.</exception> /// <exception cref="ArgumentException">Thrown when <paramref name="method"/> is not null and the method it represents returns void, is not static (Shared in Visual Basic), or does not take exactly one argument.</exception> /// <exception cref="InvalidOperationException">Thrown when <paramref name="method"/> is null and the unary minus operator is not defined for <paramref name="expression"/>.Type (or its corresponding non-nullable type if it is a nullable value type) is not assignable to the argument type of the method represented by method.</exception> public static UnaryExpression Negate(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsArithmetic() && !expression.Type.IsUnsignedInt()) { return new UnaryExpression(ExpressionType.Negate, expression, expression.Type, null); } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.Negate, "op_UnaryNegation", expression); } return GetMethodBasedUnaryOperator(ExpressionType.Negate, expression, method); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents a unary plus operation. /// </summary> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.UnaryPlus"/> and the <see cref="UnaryExpression.Operand"/> property set to the specified value.</returns> /// <exception cref="ArgumentNullException">Thrown when <paramref name="expression"/> is null.</exception> /// <exception cref="InvalidOperationException">Thrown when the unary minus operator is not defined for <paramref name="expression"/>.Type.</exception> public static UnaryExpression UnaryPlus(Expression expression) { return UnaryPlus(expression, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents a unary plus operation. /// </summary> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="method">A <see cref="MethodInfo"/> to set the <see cref="UnaryExpression.Method"/> property equal to.</param> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.UnaryPlus"/> and the <see cref="UnaryExpression.Operand"/> and <see cref="UnaryExpression.Method"/>property set to the specified value.</returns> /// <exception cref="ArgumentNullException">Thrown when <paramref name="expression"/> is null.</exception> /// <exception cref="ArgumentException">Thrown when <paramref name="method"/> is not null and the method it represents returns void, is not static (Shared in Visual Basic), or does not take exactly one argument.</exception> /// <exception cref="InvalidOperationException">Thrown when <paramref name="method"/> is null and the unary minus operator is not defined for <paramref name="expression"/>.Type (or its corresponding non-nullable type if it is a nullable value type) is not assignable to the argument type of the method represented by method.</exception> public static UnaryExpression UnaryPlus(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsArithmetic()) { return new UnaryExpression(ExpressionType.UnaryPlus, expression, expression.Type, null); } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.UnaryPlus, "op_UnaryPlus", expression); } return GetMethodBasedUnaryOperator(ExpressionType.UnaryPlus, expression, method); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents an arithmetic negation operation that has overflow checking.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.NegateChecked"/> and the <see cref="UnaryExpression.Operand"/> property set to the specified value.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <exception cref="ArgumentNullException">Thrown when <paramref name="expression"/> is null.</exception> /// <exception cref="InvalidOperationException">Thrown when the unary minus operator is not defined for <paramref name="expression"/>.Type.</exception> public static UnaryExpression NegateChecked(Expression expression) { return NegateChecked(expression, method: null); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents an arithmetic negation operation that has overflow checking. The implementing method can be specified.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.NegateChecked"/> and the <see cref="UnaryExpression.Operand"/> and <see cref="UnaryExpression.Method"/> properties set to the specified values.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="method">A <see cref="MethodInfo"/> to set the <see cref="UnaryExpression.Method"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> is null.</exception> /// <exception cref="ArgumentException"> /// <paramref name="method"/> is not null and the method it represents returns void, is not static (Shared in Visual Basic), or does not take exactly one argument.</exception> /// <exception cref="InvalidOperationException"> /// <paramref name="method"/> is null and the unary minus operator is not defined for <paramref name="expression"/>.Type.-or-<paramref name="expression"/>.Type (or its corresponding non-nullable type if it is a nullable value type) is not assignable to the argument type of the method represented by <paramref name="method"/>.</exception> public static UnaryExpression NegateChecked(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsArithmetic() && !expression.Type.IsUnsignedInt()) { return new UnaryExpression(ExpressionType.NegateChecked, expression, expression.Type, null); } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.NegateChecked, "op_UnaryNegation", expression); } return GetMethodBasedUnaryOperator(ExpressionType.NegateChecked, expression, method); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents a bitwise complement operation.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.Not"/> and the <see cref="UnaryExpression.Operand"/> property set to the specified value.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> is null.</exception> /// <exception cref="InvalidOperationException">The unary not operator is not defined for <paramref name="expression"/>.Type.</exception> public static UnaryExpression Not(Expression expression) { return Not(expression, method: null); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents a bitwise complement operation. The implementing method can be specified.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.Not"/> and the <see cref="UnaryExpression.Operand"/> and <see cref="UnaryExpression.Method"/> properties set to the specified values.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="method">A <see cref="MethodInfo"/> to set the <see cref="UnaryExpression.Method"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> is null.</exception> /// <exception cref="ArgumentException"> /// <paramref name="method"/> is not null and the method it represents returns void, is not static (Shared in Visual Basic), or does not take exactly one argument.</exception> /// <exception cref="InvalidOperationException"> /// <paramref name="method"/> is null and the unary not operator is not defined for <paramref name="expression"/>.Type.-or-<paramref name="expression"/>.Type (or its corresponding non-nullable type if it is a nullable value type) is not assignable to the argument type of the method represented by <paramref name="method"/>.</exception> public static UnaryExpression Not(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsIntegerOrBool()) { return new UnaryExpression(ExpressionType.Not, expression, expression.Type, null); } UnaryExpression? u = GetUserDefinedUnaryOperator(ExpressionType.Not, "op_LogicalNot", expression); if (u != null) { return u; } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.Not, "op_OnesComplement", expression); } return GetMethodBasedUnaryOperator(ExpressionType.Not, expression, method); } /// <summary> /// Returns whether the expression evaluates to false. /// </summary> /// <param name="expression">An <see cref="Expression"/> to evaluate.</param> /// <returns>An instance of <see cref="UnaryExpression"/>.</returns> public static UnaryExpression IsFalse(Expression expression) { return IsFalse(expression, method: null); } /// <summary> /// Returns whether the expression evaluates to false. /// </summary> /// <param name="expression">An <see cref="Expression"/> to evaluate.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>An instance of <see cref="UnaryExpression"/>.</returns> public static UnaryExpression IsFalse(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsBool()) { return new UnaryExpression(ExpressionType.IsFalse, expression, expression.Type, null); } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.IsFalse, "op_False", expression); } return GetMethodBasedUnaryOperator(ExpressionType.IsFalse, expression, method); } /// <summary> /// Returns whether the expression evaluates to true. /// </summary> /// <param name="expression">An <see cref="Expression"/> to evaluate.</param> /// <returns>An instance of <see cref="UnaryExpression"/>.</returns> public static UnaryExpression IsTrue(Expression expression) { return IsTrue(expression, method: null); } /// <summary> /// Returns whether the expression evaluates to true. /// </summary> /// <param name="expression">An <see cref="Expression"/> to evaluate.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>An instance of <see cref="UnaryExpression"/>.</returns> public static UnaryExpression IsTrue(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsBool()) { return new UnaryExpression(ExpressionType.IsTrue, expression, expression.Type, null); } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.IsTrue, "op_True", expression); } return GetMethodBasedUnaryOperator(ExpressionType.IsTrue, expression, method); } /// <summary> /// Returns the expression representing the ones complement. /// </summary> /// <param name="expression">An <see cref="Expression"/>.</param> /// <returns>An instance of <see cref="UnaryExpression"/>.</returns> public static UnaryExpression OnesComplement(Expression expression) { return OnesComplement(expression, method: null); } /// <summary> /// Returns the expression representing the ones complement. /// </summary> /// <param name="expression">An <see cref="Expression"/>.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>An instance of <see cref="UnaryExpression"/>.</returns> public static UnaryExpression OnesComplement(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsInteger()) { return new UnaryExpression(ExpressionType.OnesComplement, expression, expression.Type, null); } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.OnesComplement, "op_OnesComplement", expression); } return GetMethodBasedUnaryOperator(ExpressionType.OnesComplement, expression, method); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents an explicit reference or boxing conversion where null is supplied if the conversion fails.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.TypeAs"/> and the <see cref="UnaryExpression.Operand"/> and <see cref="Expression.Type"/> properties set to the specified values.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="type">A <see cref="System.Type"/> to set the <see cref="Type"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> or <paramref name="type"/> is null.</exception> public static UnaryExpression TypeAs(Expression expression, Type type) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); ContractUtils.RequiresNotNull(type, nameof(type)); TypeUtils.ValidateType(type, nameof(type)); if (type.IsValueType && !type.IsNullableType()) { throw Error.IncorrectTypeForTypeAs(type, nameof(type)); } return new UnaryExpression(ExpressionType.TypeAs, expression, type, null); } /// <summary> /// <summary>Creates a <see cref="UnaryExpression"/> that represents an explicit unboxing.</summary> /// </summary> /// <param name="expression">An <see cref="Expression"/> to unbox.</param> /// <param name="type">The new <see cref="System.Type"/> of the expression.</param> /// <returns>An instance of <see cref="UnaryExpression"/>.</returns> public static UnaryExpression Unbox(Expression expression, Type type) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); ContractUtils.RequiresNotNull(type, nameof(type)); if (!expression.Type.IsInterface && expression.Type != typeof(object)) { throw Error.InvalidUnboxType(nameof(expression)); } if (!type.IsValueType) throw Error.InvalidUnboxType(nameof(type)); TypeUtils.ValidateType(type, nameof(type)); return new UnaryExpression(ExpressionType.Unbox, expression, type, null); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents a conversion operation.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.Convert"/> and the <see cref="UnaryExpression.Operand"/> and <see cref="Expression.Type"/> properties set to the specified values.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="type">A <see cref="System.Type"/> to set the <see cref="Type"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> or <paramref name="type"/> is null.</exception> /// <exception cref="InvalidOperationException">No conversion operator is defined between <paramref name="expression"/>.Type and <paramref name="type"/>.</exception> public static UnaryExpression Convert(Expression expression, Type type) { return Convert(expression, type, method: null); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents a conversion operation for which the implementing method is specified.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.Convert"/> and the <see cref="UnaryExpression.Operand"/>, <see cref="Expression.Type"/>, and <see cref="UnaryExpression.Method"/> properties set to the specified values.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="type">A <see cref="System.Type"/> to set the <see cref="Type"/> property equal to.</param> /// <param name="method">A <see cref="MethodInfo"/> to set the <see cref="UnaryExpression.Method"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> or <paramref name="type"/> is null.</exception> /// <exception cref="ArgumentException"> /// <paramref name="method"/> is not null and the method it represents returns void, is not static (Shared in Visual Basic), or does not take exactly one argument.</exception> /// <exception cref="AmbiguousMatchException">More than one method that matches the <paramref name="method"/> description was found.</exception> /// <exception cref="InvalidOperationException">No conversion operator is defined between <paramref name="expression"/>.Type and <paramref name="type"/>.-or-<paramref name="expression"/>.Type is not assignable to the argument type of the method represented by <paramref name="method"/>.-or-The return type of the method represented by <paramref name="method"/> is not assignable to <paramref name="type"/>.-or-<paramref name="expression"/>.Type or <paramref name="type"/> is a nullable value type and the corresponding non-nullable value type does not equal the argument type or the return type, respectively, of the method represented by <paramref name="method"/>.</exception> public static UnaryExpression Convert(Expression expression, Type type, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); ContractUtils.RequiresNotNull(type, nameof(type)); TypeUtils.ValidateType(type, nameof(type)); if (method == null) { if (expression.Type.HasIdentityPrimitiveOrNullableConversionTo(type) || expression.Type.HasReferenceConversionTo(type)) { return new UnaryExpression(ExpressionType.Convert, expression, type, null); } return GetUserDefinedCoercionOrThrow(ExpressionType.Convert, expression, type); } return GetMethodBasedCoercionOperator(ExpressionType.Convert, expression, type, method); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents a conversion operation that throws an exception if the target type is overflowed.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.ConvertChecked"/> and the <see cref="UnaryExpression.Operand"/> and <see cref="Expression.Type"/> properties set to the specified values.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="type">A <see cref="System.Type"/> to set the <see cref="Type"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> or <paramref name="type"/> is null.</exception> /// <exception cref="InvalidOperationException">No conversion operator is defined between <paramref name="expression"/>.Type and <paramref name="type"/>.</exception> public static UnaryExpression ConvertChecked(Expression expression, Type type) { return ConvertChecked(expression, type, method: null); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents a conversion operation that throws an exception if the target type is overflowed and for which the implementing method is specified.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.ConvertChecked"/> and the <see cref="UnaryExpression.Operand"/>, <see cref="Expression.Type"/>, and <see cref="UnaryExpression.Method"/> properties set to the specified values.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <param name="type">A <see cref="System.Type"/> to set the <see cref="Type"/> property equal to.</param> /// <param name="method">A <see cref="MethodInfo"/> to set the <see cref="UnaryExpression.Method"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> or <paramref name="type"/> is null.</exception> /// <exception cref="ArgumentException"> /// <paramref name="method"/> is not null and the method it represents returns void, is not static (Shared in Visual Basic), or does not take exactly one argument.</exception> /// <exception cref="AmbiguousMatchException">More than one method that matches the <paramref name="method"/> description was found.</exception> /// <exception cref="InvalidOperationException">No conversion operator is defined between <paramref name="expression"/>.Type and <paramref name="type"/>.-or-<paramref name="expression"/>.Type is not assignable to the argument type of the method represented by <paramref name="method"/>.-or-The return type of the method represented by <paramref name="method"/> is not assignable to <paramref name="type"/>.-or-<paramref name="expression"/>.Type or <paramref name="type"/> is a nullable value type and the corresponding non-nullable value type does not equal the argument type or the return type, respectively, of the method represented by <paramref name="method"/>.</exception> public static UnaryExpression ConvertChecked(Expression expression, Type type, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); ContractUtils.RequiresNotNull(type, nameof(type)); TypeUtils.ValidateType(type, nameof(type)); if (method == null) { if (expression.Type.HasIdentityPrimitiveOrNullableConversionTo(type)) { return new UnaryExpression(ExpressionType.ConvertChecked, expression, type, null); } if (expression.Type.HasReferenceConversionTo(type)) { return new UnaryExpression(ExpressionType.Convert, expression, type, null); } return GetUserDefinedCoercionOrThrow(ExpressionType.ConvertChecked, expression, type); } return GetMethodBasedCoercionOperator(ExpressionType.ConvertChecked, expression, type, method); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents getting the length of a one-dimensional, zero-based array.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.ArrayLength"/> and the <see cref="UnaryExpression.Operand"/> property equal to <paramref name="array"/>.</returns> /// <param name="array">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="array"/> is null.</exception> /// <exception cref="ArgumentException"> /// <paramref name="array"/>.Type does not represent a single-dimensional, zero-based array type.</exception> public static UnaryExpression ArrayLength(Expression array) { ExpressionUtils.RequiresCanRead(array, nameof(array)); if (!array.Type.IsSZArray) { if (!array.Type.IsArray || !typeof(Array).IsAssignableFrom(array.Type)) { throw Error.ArgumentMustBeArray(nameof(array)); } throw Error.ArgumentMustBeSingleDimensionalArrayType(nameof(array)); } return new UnaryExpression(ExpressionType.ArrayLength, array, typeof(int), null); } /// <summary>Creates a <see cref="UnaryExpression"/> that represents an expression that has a constant value of type <see cref="Expression"/>.</summary> /// <returns>A <see cref="UnaryExpression"/> that has the <see cref="NodeType"/> property equal to <see cref="ExpressionType.Quote"/> and the <see cref="UnaryExpression.Operand"/> property set to the specified value.</returns> /// <param name="expression">An <see cref="Expression"/> to set the <see cref="UnaryExpression.Operand"/> property equal to.</param> /// <exception cref="ArgumentNullException"> /// <paramref name="expression"/> is null.</exception> public static UnaryExpression Quote(Expression expression) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); LambdaExpression? lambda = expression as LambdaExpression; if (lambda == null) { throw Error.QuotedExpressionMustBeLambda(nameof(expression)); } return new UnaryExpression(ExpressionType.Quote, lambda, lambda.PublicType, null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents a rethrowing of an exception. /// </summary> /// <returns>A <see cref="UnaryExpression"/> that represents a rethrowing of an exception.</returns> public static UnaryExpression Rethrow() { return Throw(value: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents a rethrowing of an exception with a given type. /// </summary> /// <param name="type">The new <see cref="Type"/> of the expression.</param> /// <returns>A <see cref="UnaryExpression"/> that represents a rethrowing of an exception.</returns> public static UnaryExpression Rethrow(Type type) { return Throw(null, type); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents a throwing of an exception. /// </summary> /// <param name="value">An <see cref="Expression"/>.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the exception.</returns> public static UnaryExpression Throw(Expression? value) { return Throw(value, typeof(void)); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents a throwing of a value with a given type. /// </summary> /// <param name="value">An <see cref="Expression"/>.</param> /// <param name="type">The new <see cref="Type"/> of the expression.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the exception.</returns> public static UnaryExpression Throw(Expression? value, Type type) { ContractUtils.RequiresNotNull(type, nameof(type)); TypeUtils.ValidateType(type, nameof(type)); if (value != null) { ExpressionUtils.RequiresCanRead(value, nameof(value)); if (value.Type.IsValueType) throw Error.ArgumentMustNotHaveValueType(nameof(value)); } return new UnaryExpression(ExpressionType.Throw, value!, type, null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents the incrementing of the expression by 1. /// </summary> /// <param name="expression">An <see cref="Expression"/> to increment.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the incremented expression.</returns> public static UnaryExpression Increment(Expression expression) { return Increment(expression, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents the incrementing of the expression by 1. /// </summary> /// <param name="expression">An <see cref="Expression"/> to increment.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the incremented expression.</returns> public static UnaryExpression Increment(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsArithmetic()) { return new UnaryExpression(ExpressionType.Increment, expression, expression.Type, null); } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.Increment, "op_Increment", expression); } return GetMethodBasedUnaryOperator(ExpressionType.Increment, expression, method); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents the decrementing of the expression by 1. /// </summary> /// <param name="expression">An <see cref="Expression"/> to decrement.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the decremented expression.</returns> public static UnaryExpression Decrement(Expression expression) { return Decrement(expression, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents the decrementing of the expression by 1. /// </summary> /// <param name="expression">An <see cref="Expression"/> to decrement.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the decremented expression.</returns> public static UnaryExpression Decrement(Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); if (method == null) { if (expression.Type.IsArithmetic()) { return new UnaryExpression(ExpressionType.Decrement, expression, expression.Type, null); } return GetUserDefinedUnaryOperatorOrThrow(ExpressionType.Decrement, "op_Decrement", expression); } return GetMethodBasedUnaryOperator(ExpressionType.Decrement, expression, method); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that increments the expression by 1 /// and assigns the result back to the expression. /// </summary> /// <param name="expression">An <see cref="Expression"/> to apply the operations on.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the resultant expression.</returns> public static UnaryExpression PreIncrementAssign(Expression expression) { return MakeOpAssignUnary(ExpressionType.PreIncrementAssign, expression, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that increments the expression by 1 /// and assigns the result back to the expression. /// </summary> /// <param name="expression">An <see cref="Expression"/> to apply the operations on.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the resultant expression.</returns> public static UnaryExpression PreIncrementAssign(Expression expression, MethodInfo? method) { return MakeOpAssignUnary(ExpressionType.PreIncrementAssign, expression, method); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that decrements the expression by 1 /// and assigns the result back to the expression. /// </summary> /// <param name="expression">An <see cref="Expression"/> to apply the operations on.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the resultant expression.</returns> public static UnaryExpression PreDecrementAssign(Expression expression) { return MakeOpAssignUnary(ExpressionType.PreDecrementAssign, expression, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that decrements the expression by 1 /// and assigns the result back to the expression. /// </summary> /// <param name="expression">An <see cref="Expression"/> to apply the operations on.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the resultant expression.</returns> public static UnaryExpression PreDecrementAssign(Expression expression, MethodInfo? method) { return MakeOpAssignUnary(ExpressionType.PreDecrementAssign, expression, method); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents the assignment of the expression /// followed by a subsequent increment by 1 of the original expression. /// </summary> /// <param name="expression">An <see cref="Expression"/> to apply the operations on.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the resultant expression.</returns> public static UnaryExpression PostIncrementAssign(Expression expression) { return MakeOpAssignUnary(ExpressionType.PostIncrementAssign, expression, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents the assignment of the expression /// followed by a subsequent increment by 1 of the original expression. /// </summary> /// <param name="expression">An <see cref="Expression"/> to apply the operations on.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the resultant expression.</returns> public static UnaryExpression PostIncrementAssign(Expression expression, MethodInfo? method) { return MakeOpAssignUnary(ExpressionType.PostIncrementAssign, expression, method); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents the assignment of the expression /// followed by a subsequent decrement by 1 of the original expression. /// </summary> /// <param name="expression">An <see cref="Expression"/> to apply the operations on.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the resultant expression.</returns> public static UnaryExpression PostDecrementAssign(Expression expression) { return MakeOpAssignUnary(ExpressionType.PostDecrementAssign, expression, method: null); } /// <summary> /// Creates a <see cref="UnaryExpression"/> that represents the assignment of the expression /// followed by a subsequent decrement by 1 of the original expression. /// </summary> /// <param name="expression">An <see cref="Expression"/> to apply the operations on.</param> /// <param name="method">A <see cref="MethodInfo"/> that represents the implementing method.</param> /// <returns>A <see cref="UnaryExpression"/> that represents the resultant expression.</returns> public static UnaryExpression PostDecrementAssign(Expression expression, MethodInfo? method) { return MakeOpAssignUnary(ExpressionType.PostDecrementAssign, expression, method); } private static UnaryExpression MakeOpAssignUnary(ExpressionType kind, Expression expression, MethodInfo? method) { ExpressionUtils.RequiresCanRead(expression, nameof(expression)); RequiresCanWrite(expression, nameof(expression)); UnaryExpression result; if (method == null) { if (expression.Type.IsArithmetic()) { return new UnaryExpression(kind, expression, expression.Type, null); } string name; if (kind == ExpressionType.PreIncrementAssign || kind == ExpressionType.PostIncrementAssign) { name = "op_Increment"; } else { Debug.Assert(kind == ExpressionType.PreDecrementAssign || kind == ExpressionType.PostDecrementAssign); name = "op_Decrement"; } result = GetUserDefinedUnaryOperatorOrThrow(kind, name, expression); } else { result = GetMethodBasedUnaryOperator(kind, expression, method); } // return type must be assignable back to the operand type if (!TypeUtils.AreReferenceAssignable(expression.Type, result.Type)) { throw Error.UserDefinedOpMustHaveValidReturnType(kind, method!.Name); } return result; } } }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/libraries/Microsoft.VisualBasic.Core/tests/Microsoft/VisualBasic/HideModuleNameAttributeTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace Microsoft.VisualBasic.Tests { public class HideModuleNameAttributeTests { [Fact] public void Ctor_Empty_Success() { new HideModuleNameAttribute(); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace Microsoft.VisualBasic.Tests { public class HideModuleNameAttributeTests { [Fact] public void Ctor_Empty_Success() { new HideModuleNameAttribute(); } } }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/pal/src/libunwind/tests/Ltest-nocalloc.c
/* libunwind - a platform-independent unwind library Copyright (C) 2011 Google, Inc Contributed by Paul Pluzhnikov <[email protected]> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define UNW_LOCAL_ONLY #include <libunwind.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <dlfcn.h> #include <pthread.h> #define panic(args...) \ { fprintf (stderr, args); exit (-1); } int num_mallocs; int num_callocs; int in_unwind; void * calloc(size_t n, size_t s) { static void * (*func)(size_t, size_t); #ifdef __GLIBC__ /* In glibc, dlsym() calls calloc. Calling dlsym(RTLD_NEXT, "calloc") here causes infinite recursion. Instead, we simply use it by its other name. */ extern void *__libc_calloc(size_t, size_t); if (!func) func = &__libc_calloc; #else if(!func) func = dlsym(RTLD_NEXT, "calloc"); #endif if (in_unwind) { num_callocs++; return NULL; } else { return func(n, s); } } void * malloc(size_t s) { static void * (*func)(size_t); if(!func) func = dlsym(RTLD_NEXT, "malloc"); if (in_unwind) { num_mallocs++; return NULL; } else { return func(s); } } static void do_backtrace (void) { const int num_levels = 100; void *pc[num_levels]; in_unwind = 1; unw_backtrace(pc, num_levels); in_unwind = 0; } void foo3 (void) { do_backtrace (); } void foo2 (void) { foo3 (); } void foo1 (void) { foo2 (); } int main (void) { int i, num_errors; /* Create (and leak) 100 TSDs, then call backtrace() and check that it doesn't call malloc()/calloc(). */ for (i = 0; i < 100; ++i) { pthread_key_t key; if (pthread_key_create (&key, NULL)) panic ("FAILURE: unable to create key %d\n", i); } /* Call backtrace right after thread creation, * where we are sure that we're not inside malloc */ do_backtrace(); num_mallocs = num_callocs = 0; foo1 (); num_errors = num_mallocs + num_callocs; if (num_errors > 0) { fprintf (stderr, "FAILURE: detected %d error%s (malloc: %d, calloc: %d)\n", num_errors, num_errors > 1 ? "s" : "", num_mallocs, num_callocs); exit (-1); } return 0; }
/* libunwind - a platform-independent unwind library Copyright (C) 2011 Google, Inc Contributed by Paul Pluzhnikov <[email protected]> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define UNW_LOCAL_ONLY #include <libunwind.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <dlfcn.h> #include <pthread.h> #define panic(args...) \ { fprintf (stderr, args); exit (-1); } int num_mallocs; int num_callocs; int in_unwind; void * calloc(size_t n, size_t s) { static void * (*func)(size_t, size_t); #ifdef __GLIBC__ /* In glibc, dlsym() calls calloc. Calling dlsym(RTLD_NEXT, "calloc") here causes infinite recursion. Instead, we simply use it by its other name. */ extern void *__libc_calloc(size_t, size_t); if (!func) func = &__libc_calloc; #else if(!func) func = dlsym(RTLD_NEXT, "calloc"); #endif if (in_unwind) { num_callocs++; return NULL; } else { return func(n, s); } } void * malloc(size_t s) { static void * (*func)(size_t); if(!func) func = dlsym(RTLD_NEXT, "malloc"); if (in_unwind) { num_mallocs++; return NULL; } else { return func(s); } } static void do_backtrace (void) { const int num_levels = 100; void *pc[num_levels]; in_unwind = 1; unw_backtrace(pc, num_levels); in_unwind = 0; } void foo3 (void) { do_backtrace (); } void foo2 (void) { foo3 (); } void foo1 (void) { foo2 (); } int main (void) { int i, num_errors; /* Create (and leak) 100 TSDs, then call backtrace() and check that it doesn't call malloc()/calloc(). */ for (i = 0; i < 100; ++i) { pthread_key_t key; if (pthread_key_create (&key, NULL)) panic ("FAILURE: unable to create key %d\n", i); } /* Call backtrace right after thread creation, * where we are sure that we're not inside malloc */ do_backtrace(); num_mallocs = num_callocs = 0; foo1 (); num_errors = num_mallocs + num_callocs; if (num_errors > 0) { fprintf (stderr, "FAILURE: detected %d error%s (malloc: %d, calloc: %d)\n", num_errors, num_errors > 1 ? "s" : "", num_mallocs, num_callocs); exit (-1); } return 0; }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/tests/baseservices/threading/commitstackonlyasneeded/DefaultStackCommit.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <CLRTestPriority>1</CLRTestPriority> <!-- Test unsupported outside of windows --> <CLRTestTargetUnsupported Condition="'$(TargetsWindows)' != 'true'">true</CLRTestTargetUnsupported> </PropertyGroup> <ItemGroup> <Compile Include="DefaultStackCommit.cs" /> <Compile Include="StackCommitCommon.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <CLRTestPriority>1</CLRTestPriority> <!-- Test unsupported outside of windows --> <CLRTestTargetUnsupported Condition="'$(TargetsWindows)' != 'true'">true</CLRTestTargetUnsupported> </PropertyGroup> <ItemGroup> <Compile Include="DefaultStackCommit.cs" /> <Compile Include="StackCommitCommon.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/tests/JIT/Methodical/nonvirtualcall/tailcall.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.IO; using System.Runtime.CompilerServices; class ApplicationException : Exception { public ApplicationException(string message) : base(message) { } } namespace Test { public abstract class Base { public abstract string AbstractFinal(); public abstract string AbstractOverrideFinal(); public abstract string AbstractOverrideOverride(); public abstract string AbstractOverrideNil(); public virtual string VirtualFinal() { return "Base.VirtualFinal"; } public virtual string VirtualNilFinal() { return "Base.VirtualNilFinal"; } public virtual string VirtualOverrideFinal() { return "Base.VirtualOverrideFinal"; } public virtual string VirtualNilOverride() { return "Base.VirtualNilOverride"; } public virtual string VirtualNilNil() { return "Base.VirtualNilNil"; } public virtual string VirtualOverrideOverride() { return "Base.VirtualOverrideOverride"; } public virtual string VirtualOverrideNil() { return "Base.VirtualOverrideNil"; } } public class Child : Base { public sealed override string AbstractFinal() { return "Child.AbstractFinal"; } public string CallAbstractFinal() { return AbstractFinal(); } public override string AbstractOverrideFinal() { return "Child.AbstractOverrideFinal"; } public override string AbstractOverrideOverride() { return "Child.AbstractOverrideOverride"; } public override string AbstractOverrideNil() { return "Child.AbstractOverrideNil"; } public string CallAbstractOverrideNil() { return AbstractOverrideNil(); } public sealed override string VirtualFinal() { return "Child.VirtualFinal"; } public string CallVirtualFinal() { return VirtualFinal(); } public override string VirtualOverrideFinal() { return "Child.VirtualOverrideFinal"; } public override string VirtualOverrideOverride() { return "Child.VirtualOverrideOverride"; } public override string VirtualOverrideNil() { return "Child.VirtualOverrideNil"; } public string CallVirtualOverrideNil() { return VirtualOverrideNil(); } } public class GrandChild : Child { public sealed override string AbstractOverrideFinal() { return "GrandChild.AbstractOverrideFinal"; } public string CallAbstractOverrideFinal() { return AbstractOverrideFinal(); } public override string AbstractOverrideOverride() { return "GrandChild.AbstractOverrideOverride"; } public string CallAbstractOverrideOverride() { return AbstractOverrideOverride(); } public sealed override string VirtualNilFinal() { return "GrandChild.VirtualNilFinal"; } public string CallVirtualNilFinal() { return VirtualNilFinal(); } public sealed override string VirtualOverrideFinal() { return "GrandChild.VirtualOverrideFinal"; } public string CallVirtualOverrideFinal() { return VirtualOverrideFinal(); } public override string VirtualOverrideOverride() { return "GrandChild.VirtualOverrideOverride"; } public string CallVirtualOverrideOverride() { return VirtualOverrideOverride(); } public override string VirtualNilOverride() { return "GrandChild.VirtualNilOverride"; } public string CallVirtualNilOverride() { return VirtualNilOverride(); } public void TestGrandChild() { Console.WriteLine("Call from inside GrandChild"); Assert.AreEqual("Child.AbstractFinal", CallAbstractFinal()); Assert.AreEqual("GrandChild.AbstractOverrideFinal", CallAbstractOverrideFinal()); Assert.AreEqual("GrandChild.AbstractOverrideOverride", CallAbstractOverrideOverride()); Assert.AreEqual("Child.AbstractOverrideNil", CallAbstractOverrideNil()); Assert.AreEqual("Child.VirtualFinal", CallVirtualFinal()); Assert.AreEqual("GrandChild.VirtualOverrideFinal", CallVirtualOverrideFinal()); Assert.AreEqual("GrandChild.VirtualOverrideOverride", CallVirtualOverrideOverride()); Assert.AreEqual("Child.VirtualOverrideNil", CallVirtualOverrideNil()); } } public static class Program { public static void CallFromInsideGrandChild() { GrandChild child = new GrandChild(); child.TestGrandChild(); } public static int Main() { try { CallFromInsideGrandChild(); Console.WriteLine("Test SUCCESS"); return 100; } catch (Exception ex) { Console.WriteLine(ex); Console.WriteLine("Test FAILED"); return 101; } } } public static class Assert { public static void AreEqual(string left, string right) { if (String.IsNullOrEmpty(left)) throw new ArgumentNullException("left"); if (string.IsNullOrEmpty(right)) throw new ArgumentNullException("right"); if (left != right) { string message = String.Format("[[{0}]] != [[{1}]]", left, right); throw new ApplicationException(message); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.IO; using System.Runtime.CompilerServices; class ApplicationException : Exception { public ApplicationException(string message) : base(message) { } } namespace Test { public abstract class Base { public abstract string AbstractFinal(); public abstract string AbstractOverrideFinal(); public abstract string AbstractOverrideOverride(); public abstract string AbstractOverrideNil(); public virtual string VirtualFinal() { return "Base.VirtualFinal"; } public virtual string VirtualNilFinal() { return "Base.VirtualNilFinal"; } public virtual string VirtualOverrideFinal() { return "Base.VirtualOverrideFinal"; } public virtual string VirtualNilOverride() { return "Base.VirtualNilOverride"; } public virtual string VirtualNilNil() { return "Base.VirtualNilNil"; } public virtual string VirtualOverrideOverride() { return "Base.VirtualOverrideOverride"; } public virtual string VirtualOverrideNil() { return "Base.VirtualOverrideNil"; } } public class Child : Base { public sealed override string AbstractFinal() { return "Child.AbstractFinal"; } public string CallAbstractFinal() { return AbstractFinal(); } public override string AbstractOverrideFinal() { return "Child.AbstractOverrideFinal"; } public override string AbstractOverrideOverride() { return "Child.AbstractOverrideOverride"; } public override string AbstractOverrideNil() { return "Child.AbstractOverrideNil"; } public string CallAbstractOverrideNil() { return AbstractOverrideNil(); } public sealed override string VirtualFinal() { return "Child.VirtualFinal"; } public string CallVirtualFinal() { return VirtualFinal(); } public override string VirtualOverrideFinal() { return "Child.VirtualOverrideFinal"; } public override string VirtualOverrideOverride() { return "Child.VirtualOverrideOverride"; } public override string VirtualOverrideNil() { return "Child.VirtualOverrideNil"; } public string CallVirtualOverrideNil() { return VirtualOverrideNil(); } } public class GrandChild : Child { public sealed override string AbstractOverrideFinal() { return "GrandChild.AbstractOverrideFinal"; } public string CallAbstractOverrideFinal() { return AbstractOverrideFinal(); } public override string AbstractOverrideOverride() { return "GrandChild.AbstractOverrideOverride"; } public string CallAbstractOverrideOverride() { return AbstractOverrideOverride(); } public sealed override string VirtualNilFinal() { return "GrandChild.VirtualNilFinal"; } public string CallVirtualNilFinal() { return VirtualNilFinal(); } public sealed override string VirtualOverrideFinal() { return "GrandChild.VirtualOverrideFinal"; } public string CallVirtualOverrideFinal() { return VirtualOverrideFinal(); } public override string VirtualOverrideOverride() { return "GrandChild.VirtualOverrideOverride"; } public string CallVirtualOverrideOverride() { return VirtualOverrideOverride(); } public override string VirtualNilOverride() { return "GrandChild.VirtualNilOverride"; } public string CallVirtualNilOverride() { return VirtualNilOverride(); } public void TestGrandChild() { Console.WriteLine("Call from inside GrandChild"); Assert.AreEqual("Child.AbstractFinal", CallAbstractFinal()); Assert.AreEqual("GrandChild.AbstractOverrideFinal", CallAbstractOverrideFinal()); Assert.AreEqual("GrandChild.AbstractOverrideOverride", CallAbstractOverrideOverride()); Assert.AreEqual("Child.AbstractOverrideNil", CallAbstractOverrideNil()); Assert.AreEqual("Child.VirtualFinal", CallVirtualFinal()); Assert.AreEqual("GrandChild.VirtualOverrideFinal", CallVirtualOverrideFinal()); Assert.AreEqual("GrandChild.VirtualOverrideOverride", CallVirtualOverrideOverride()); Assert.AreEqual("Child.VirtualOverrideNil", CallVirtualOverrideNil()); } } public static class Program { public static void CallFromInsideGrandChild() { GrandChild child = new GrandChild(); child.TestGrandChild(); } public static int Main() { try { CallFromInsideGrandChild(); Console.WriteLine("Test SUCCESS"); return 100; } catch (Exception ex) { Console.WriteLine(ex); Console.WriteLine("Test FAILED"); return 101; } } } public static class Assert { public static void AreEqual(string left, string right) { if (String.IsNullOrEmpty(left)) throw new ArgumentNullException("left"); if (string.IsNullOrEmpty(right)) throw new ArgumentNullException("right"); if (left != right) { string message = String.Format("[[{0}]] != [[{1}]]", left, right); throw new ApplicationException(message); } } } }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/libraries/System.Private.CoreLib/src/System/Number.NumberBuffer.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Text; using System.Runtime.CompilerServices; namespace System { internal static partial class Number { // We need 1 additional byte, per length, for the terminating null internal const int DecimalNumberBufferLength = 29 + 1 + 1; // 29 for the longest input + 1 for rounding internal const int DoubleNumberBufferLength = 767 + 1 + 1; // 767 for the longest input + 1 for rounding: 4.9406564584124654E-324 internal const int Int32NumberBufferLength = 10 + 1; // 10 for the longest input: 2,147,483,647 internal const int Int64NumberBufferLength = 19 + 1; // 19 for the longest input: 9,223,372,036,854,775,807 internal const int SingleNumberBufferLength = 112 + 1 + 1; // 112 for the longest input + 1 for rounding: 1.40129846E-45 internal const int HalfNumberBufferLength = 21; // 19 for the longest input + 1 for rounding (+1 for the null terminator) internal const int UInt32NumberBufferLength = 10 + 1; // 10 for the longest input: 4,294,967,295 internal const int UInt64NumberBufferLength = 20 + 1; // 20 for the longest input: 18,446,744,073,709,551,615 internal unsafe ref struct NumberBuffer { public int DigitsCount; public int Scale; public bool IsNegative; public bool HasNonZeroTail; public NumberBufferKind Kind; public Span<byte> Digits; public NumberBuffer(NumberBufferKind kind, byte* digits, int digitsLength) { Debug.Assert(digits != null); Debug.Assert(digitsLength > 0); DigitsCount = 0; Scale = 0; IsNegative = false; HasNonZeroTail = false; Kind = kind; Digits = new Span<byte>(digits, digitsLength); #if DEBUG Digits.Fill(0xCC); #endif Digits[0] = (byte)('\0'); CheckConsistency(); } [Conditional("DEBUG")] public void CheckConsistency() { #if DEBUG Debug.Assert((Kind == NumberBufferKind.Integer) || (Kind == NumberBufferKind.Decimal) || (Kind == NumberBufferKind.FloatingPoint)); Debug.Assert(Digits[0] != '0', "Leading zeros should never be stored in a Number"); int numDigits; for (numDigits = 0; numDigits < Digits.Length; numDigits++) { byte digit = Digits[numDigits]; if (digit == 0) { break; } Debug.Assert((digit >= '0') && (digit <= '9'), "Unexpected character found in Number"); } Debug.Assert(numDigits == DigitsCount, "Null terminator found in unexpected location in Number"); Debug.Assert(numDigits < Digits.Length, "Null terminator not found in Number"); #endif // DEBUG } public byte* GetDigitsPointer() { // This is safe to do since we are a ref struct return (byte*)(Unsafe.AsPointer(ref Digits[0])); } // // Code coverage note: This only exists so that Number displays nicely in the VS watch window. So yes, I know it works. // public override string ToString() { StringBuilder sb = new StringBuilder(); sb.Append('['); sb.Append('"'); for (int i = 0; i < Digits.Length; i++) { byte digit = Digits[i]; if (digit == 0) { break; } sb.Append((char)(digit)); } sb.Append('"'); sb.Append(", Length = ").Append(DigitsCount); sb.Append(", Scale = ").Append(Scale); sb.Append(", IsNegative = ").Append(IsNegative); sb.Append(", HasNonZeroTail = ").Append(HasNonZeroTail); sb.Append(", Kind = ").Append(Kind); sb.Append(']'); return sb.ToString(); } } internal enum NumberBufferKind : byte { Unknown = 0, Integer = 1, Decimal = 2, FloatingPoint = 3, } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Text; using System.Runtime.CompilerServices; namespace System { internal static partial class Number { // We need 1 additional byte, per length, for the terminating null internal const int DecimalNumberBufferLength = 29 + 1 + 1; // 29 for the longest input + 1 for rounding internal const int DoubleNumberBufferLength = 767 + 1 + 1; // 767 for the longest input + 1 for rounding: 4.9406564584124654E-324 internal const int Int32NumberBufferLength = 10 + 1; // 10 for the longest input: 2,147,483,647 internal const int Int64NumberBufferLength = 19 + 1; // 19 for the longest input: 9,223,372,036,854,775,807 internal const int SingleNumberBufferLength = 112 + 1 + 1; // 112 for the longest input + 1 for rounding: 1.40129846E-45 internal const int HalfNumberBufferLength = 21; // 19 for the longest input + 1 for rounding (+1 for the null terminator) internal const int UInt32NumberBufferLength = 10 + 1; // 10 for the longest input: 4,294,967,295 internal const int UInt64NumberBufferLength = 20 + 1; // 20 for the longest input: 18,446,744,073,709,551,615 internal unsafe ref struct NumberBuffer { public int DigitsCount; public int Scale; public bool IsNegative; public bool HasNonZeroTail; public NumberBufferKind Kind; public Span<byte> Digits; public NumberBuffer(NumberBufferKind kind, byte* digits, int digitsLength) { Debug.Assert(digits != null); Debug.Assert(digitsLength > 0); DigitsCount = 0; Scale = 0; IsNegative = false; HasNonZeroTail = false; Kind = kind; Digits = new Span<byte>(digits, digitsLength); #if DEBUG Digits.Fill(0xCC); #endif Digits[0] = (byte)('\0'); CheckConsistency(); } [Conditional("DEBUG")] public void CheckConsistency() { #if DEBUG Debug.Assert((Kind == NumberBufferKind.Integer) || (Kind == NumberBufferKind.Decimal) || (Kind == NumberBufferKind.FloatingPoint)); Debug.Assert(Digits[0] != '0', "Leading zeros should never be stored in a Number"); int numDigits; for (numDigits = 0; numDigits < Digits.Length; numDigits++) { byte digit = Digits[numDigits]; if (digit == 0) { break; } Debug.Assert((digit >= '0') && (digit <= '9'), "Unexpected character found in Number"); } Debug.Assert(numDigits == DigitsCount, "Null terminator found in unexpected location in Number"); Debug.Assert(numDigits < Digits.Length, "Null terminator not found in Number"); #endif // DEBUG } public byte* GetDigitsPointer() { // This is safe to do since we are a ref struct return (byte*)(Unsafe.AsPointer(ref Digits[0])); } // // Code coverage note: This only exists so that Number displays nicely in the VS watch window. So yes, I know it works. // public override string ToString() { StringBuilder sb = new StringBuilder(); sb.Append('['); sb.Append('"'); for (int i = 0; i < Digits.Length; i++) { byte digit = Digits[i]; if (digit == 0) { break; } sb.Append((char)(digit)); } sb.Append('"'); sb.Append(", Length = ").Append(DigitsCount); sb.Append(", Scale = ").Append(Scale); sb.Append(", IsNegative = ").Append(IsNegative); sb.Append(", HasNonZeroTail = ").Append(HasNonZeroTail); sb.Append(", Kind = ").Append(Kind); sb.Append(']'); return sb.ToString(); } } internal enum NumberBufferKind : byte { Unknown = 0, Integer = 1, Decimal = 2, FloatingPoint = 3, } } }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/bft27.txt
Microsoft (R) XSLT Compiler version 2.0.60609 for Microsoft (R) Windows (R) 2005 Framework version 2.0.50727 Copyright (C) Microsoft Corporation 2006. All rights reserved. fatal error : Unrecognized option: '/'.
Microsoft (R) XSLT Compiler version 2.0.60609 for Microsoft (R) Windows (R) 2005 Framework version 2.0.50727 Copyright (C) Microsoft Corporation 2006. All rights reserved. fatal error : Unrecognized option: '/'.
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/libraries/Common/src/System/Security/Cryptography/DSASecurityTransforms.macOS.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Buffers; using System.Diagnostics; using System.Formats.Asn1; using System.IO; using System.Runtime.InteropServices; using System.Security.Cryptography.Apple; using Internal.Cryptography; namespace System.Security.Cryptography { internal static partial class DSAImplementation { public sealed partial class DSASecurityTransforms : DSA { public override DSAParameters ExportParameters(bool includePrivateParameters) { // Apple requires all private keys to be exported encrypted, but since we're trying to export // as parsed structures we will need to decrypt it for the user. const string ExportPassword = "DotnetExportPassphrase"; SecKeyPair keys = GetKeys(); if (includePrivateParameters && keys.PrivateKey == null) { throw new CryptographicException(SR.Cryptography_OpenInvalidHandle); } byte[] keyBlob = Interop.AppleCrypto.SecKeyExport( includePrivateParameters ? keys.PrivateKey : keys.PublicKey, exportPrivate: includePrivateParameters, password: ExportPassword); try { if (!includePrivateParameters) { DSAKeyFormatHelper.ReadSubjectPublicKeyInfo( keyBlob, out int localRead, out DSAParameters key); Debug.Assert(localRead == keyBlob.Length); return key; } else { DSAKeyFormatHelper.ReadEncryptedPkcs8( keyBlob, ExportPassword, out int localRead, out DSAParameters key); Debug.Assert(localRead == keyBlob.Length); return key; } } finally { CryptographicOperations.ZeroMemory(keyBlob); } } public override void ImportParameters(DSAParameters parameters) { if (parameters.P == null || parameters.Q == null || parameters.G == null || parameters.Y == null) throw new ArgumentException(SR.Cryptography_InvalidDsaParameters_MissingFields); // J is not required and is not even used on CNG blobs. // It should, however, be less than P (J == (P-1) / Q). // This validation check is just to maintain parity with DSACng and DSACryptoServiceProvider, // which also perform this check. if (parameters.J != null && parameters.J.Length >= parameters.P.Length) throw new ArgumentException(SR.Cryptography_InvalidDsaParameters_MismatchedPJ); int keySize = parameters.P.Length; bool hasPrivateKey = parameters.X != null; if (parameters.G.Length != keySize || parameters.Y.Length != keySize) throw new ArgumentException(SR.Cryptography_InvalidDsaParameters_MismatchedPGY); if (hasPrivateKey && parameters.X!.Length != parameters.Q.Length) throw new ArgumentException(SR.Cryptography_InvalidDsaParameters_MismatchedQX); if (!(8 * parameters.P.Length).IsLegalSize(LegalKeySizes)) throw new CryptographicException(SR.Cryptography_InvalidKeySize); if (parameters.Q.Length != 20) throw new CryptographicException(SR.Cryptography_InvalidDsaParameters_QRestriction_ShortKey); ThrowIfDisposed(); if (hasPrivateKey) { SafeSecKeyRefHandle privateKey = ImportKey(parameters); DSAParameters publicOnly = parameters; publicOnly.X = null; SafeSecKeyRefHandle publicKey; try { publicKey = ImportKey(publicOnly); } catch { privateKey.Dispose(); throw; } SetKey(SecKeyPair.PublicPrivatePair(publicKey, privateKey)); } else { SafeSecKeyRefHandle publicKey = ImportKey(parameters); SetKey(SecKeyPair.PublicOnly(publicKey)); } } public override void ImportEncryptedPkcs8PrivateKey( ReadOnlySpan<byte> passwordBytes, ReadOnlySpan<byte> source, out int bytesRead) { ThrowIfDisposed(); base.ImportEncryptedPkcs8PrivateKey(passwordBytes, source, out bytesRead); } public override void ImportEncryptedPkcs8PrivateKey( ReadOnlySpan<char> password, ReadOnlySpan<byte> source, out int bytesRead) { ThrowIfDisposed(); base.ImportEncryptedPkcs8PrivateKey(password, source, out bytesRead); } private static SafeSecKeyRefHandle ImportKey(DSAParameters parameters) { AsnWriter keyWriter; bool hasPrivateKey; if (parameters.X != null) { // DSAPrivateKey ::= SEQUENCE( // version INTEGER, // p INTEGER, // q INTEGER, // g INTEGER, // y INTEGER, // x INTEGER, // ) keyWriter = new AsnWriter(AsnEncodingRules.DER); using (keyWriter.PushSequence()) { keyWriter.WriteInteger(0); keyWriter.WriteKeyParameterInteger(parameters.P); keyWriter.WriteKeyParameterInteger(parameters.Q); keyWriter.WriteKeyParameterInteger(parameters.G); keyWriter.WriteKeyParameterInteger(parameters.Y); keyWriter.WriteKeyParameterInteger(parameters.X); } hasPrivateKey = true; } else { keyWriter = DSAKeyFormatHelper.WriteSubjectPublicKeyInfo(parameters); hasPrivateKey = false; } byte[] rented = CryptoPool.Rent(keyWriter.GetEncodedLength()); if (!keyWriter.TryEncode(rented, out int written)) { Debug.Fail("TryEncode failed with a pre-allocated buffer"); throw new InvalidOperationException(); } // Explicitly clear the inner buffer keyWriter.Reset(); try { return Interop.AppleCrypto.ImportEphemeralKey(rented.AsSpan(0, written), hasPrivateKey); } finally { CryptoPool.Return(rented, written); } } public override unsafe void ImportSubjectPublicKeyInfo( ReadOnlySpan<byte> source, out int bytesRead) { ThrowIfDisposed(); fixed (byte* ptr = &MemoryMarshal.GetReference(source)) { using (MemoryManager<byte> manager = new PointerMemoryManager<byte>(ptr, source.Length)) { // Validate the DER value and get the number of bytes. DSAKeyFormatHelper.ReadSubjectPublicKeyInfo( manager.Memory, out int localRead); SafeSecKeyRefHandle publicKey = Interop.AppleCrypto.ImportEphemeralKey(source.Slice(0, localRead), false); SetKey(SecKeyPair.PublicOnly(publicKey)); bytesRead = localRead; } } } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Buffers; using System.Diagnostics; using System.Formats.Asn1; using System.IO; using System.Runtime.InteropServices; using System.Security.Cryptography.Apple; using Internal.Cryptography; namespace System.Security.Cryptography { internal static partial class DSAImplementation { public sealed partial class DSASecurityTransforms : DSA { public override DSAParameters ExportParameters(bool includePrivateParameters) { // Apple requires all private keys to be exported encrypted, but since we're trying to export // as parsed structures we will need to decrypt it for the user. const string ExportPassword = "DotnetExportPassphrase"; SecKeyPair keys = GetKeys(); if (includePrivateParameters && keys.PrivateKey == null) { throw new CryptographicException(SR.Cryptography_OpenInvalidHandle); } byte[] keyBlob = Interop.AppleCrypto.SecKeyExport( includePrivateParameters ? keys.PrivateKey : keys.PublicKey, exportPrivate: includePrivateParameters, password: ExportPassword); try { if (!includePrivateParameters) { DSAKeyFormatHelper.ReadSubjectPublicKeyInfo( keyBlob, out int localRead, out DSAParameters key); Debug.Assert(localRead == keyBlob.Length); return key; } else { DSAKeyFormatHelper.ReadEncryptedPkcs8( keyBlob, ExportPassword, out int localRead, out DSAParameters key); Debug.Assert(localRead == keyBlob.Length); return key; } } finally { CryptographicOperations.ZeroMemory(keyBlob); } } public override void ImportParameters(DSAParameters parameters) { if (parameters.P == null || parameters.Q == null || parameters.G == null || parameters.Y == null) throw new ArgumentException(SR.Cryptography_InvalidDsaParameters_MissingFields); // J is not required and is not even used on CNG blobs. // It should, however, be less than P (J == (P-1) / Q). // This validation check is just to maintain parity with DSACng and DSACryptoServiceProvider, // which also perform this check. if (parameters.J != null && parameters.J.Length >= parameters.P.Length) throw new ArgumentException(SR.Cryptography_InvalidDsaParameters_MismatchedPJ); int keySize = parameters.P.Length; bool hasPrivateKey = parameters.X != null; if (parameters.G.Length != keySize || parameters.Y.Length != keySize) throw new ArgumentException(SR.Cryptography_InvalidDsaParameters_MismatchedPGY); if (hasPrivateKey && parameters.X!.Length != parameters.Q.Length) throw new ArgumentException(SR.Cryptography_InvalidDsaParameters_MismatchedQX); if (!(8 * parameters.P.Length).IsLegalSize(LegalKeySizes)) throw new CryptographicException(SR.Cryptography_InvalidKeySize); if (parameters.Q.Length != 20) throw new CryptographicException(SR.Cryptography_InvalidDsaParameters_QRestriction_ShortKey); ThrowIfDisposed(); if (hasPrivateKey) { SafeSecKeyRefHandle privateKey = ImportKey(parameters); DSAParameters publicOnly = parameters; publicOnly.X = null; SafeSecKeyRefHandle publicKey; try { publicKey = ImportKey(publicOnly); } catch { privateKey.Dispose(); throw; } SetKey(SecKeyPair.PublicPrivatePair(publicKey, privateKey)); } else { SafeSecKeyRefHandle publicKey = ImportKey(parameters); SetKey(SecKeyPair.PublicOnly(publicKey)); } } public override void ImportEncryptedPkcs8PrivateKey( ReadOnlySpan<byte> passwordBytes, ReadOnlySpan<byte> source, out int bytesRead) { ThrowIfDisposed(); base.ImportEncryptedPkcs8PrivateKey(passwordBytes, source, out bytesRead); } public override void ImportEncryptedPkcs8PrivateKey( ReadOnlySpan<char> password, ReadOnlySpan<byte> source, out int bytesRead) { ThrowIfDisposed(); base.ImportEncryptedPkcs8PrivateKey(password, source, out bytesRead); } private static SafeSecKeyRefHandle ImportKey(DSAParameters parameters) { AsnWriter keyWriter; bool hasPrivateKey; if (parameters.X != null) { // DSAPrivateKey ::= SEQUENCE( // version INTEGER, // p INTEGER, // q INTEGER, // g INTEGER, // y INTEGER, // x INTEGER, // ) keyWriter = new AsnWriter(AsnEncodingRules.DER); using (keyWriter.PushSequence()) { keyWriter.WriteInteger(0); keyWriter.WriteKeyParameterInteger(parameters.P); keyWriter.WriteKeyParameterInteger(parameters.Q); keyWriter.WriteKeyParameterInteger(parameters.G); keyWriter.WriteKeyParameterInteger(parameters.Y); keyWriter.WriteKeyParameterInteger(parameters.X); } hasPrivateKey = true; } else { keyWriter = DSAKeyFormatHelper.WriteSubjectPublicKeyInfo(parameters); hasPrivateKey = false; } byte[] rented = CryptoPool.Rent(keyWriter.GetEncodedLength()); if (!keyWriter.TryEncode(rented, out int written)) { Debug.Fail("TryEncode failed with a pre-allocated buffer"); throw new InvalidOperationException(); } // Explicitly clear the inner buffer keyWriter.Reset(); try { return Interop.AppleCrypto.ImportEphemeralKey(rented.AsSpan(0, written), hasPrivateKey); } finally { CryptoPool.Return(rented, written); } } public override unsafe void ImportSubjectPublicKeyInfo( ReadOnlySpan<byte> source, out int bytesRead) { ThrowIfDisposed(); fixed (byte* ptr = &MemoryMarshal.GetReference(source)) { using (MemoryManager<byte> manager = new PointerMemoryManager<byte>(ptr, source.Length)) { // Validate the DER value and get the number of bytes. DSAKeyFormatHelper.ReadSubjectPublicKeyInfo( manager.Memory, out int localRead); SafeSecKeyRefHandle publicKey = Interop.AppleCrypto.ImportEphemeralKey(source.Slice(0, localRead), false); SetKey(SecKeyPair.PublicOnly(publicKey)); bytesRead = localRead; } } } } } }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/MultiplyDoublingSaturateHigh.Vector64.Int32.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void MultiplyDoublingSaturateHigh_Vector64_Int32() { var test = new SimpleBinaryOpTest__MultiplyDoublingSaturateHigh_Vector64_Int32(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__MultiplyDoublingSaturateHigh_Vector64_Int32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray1, Int32[] inArray2, Int32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int32> _fld1; public Vector64<Int32> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__MultiplyDoublingSaturateHigh_Vector64_Int32 testClass) { var result = AdvSimd.MultiplyDoublingSaturateHigh(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__MultiplyDoublingSaturateHigh_Vector64_Int32 testClass) { fixed (Vector64<Int32>* pFld1 = &_fld1) fixed (Vector64<Int32>* pFld2 = &_fld2) { var result = AdvSimd.MultiplyDoublingSaturateHigh( AdvSimd.LoadVector64((Int32*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static Int32[] _data1 = new Int32[Op1ElementCount]; private static Int32[] _data2 = new Int32[Op2ElementCount]; private static Vector64<Int32> _clsVar1; private static Vector64<Int32> _clsVar2; private Vector64<Int32> _fld1; private Vector64<Int32> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__MultiplyDoublingSaturateHigh_Vector64_Int32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); } public SimpleBinaryOpTest__MultiplyDoublingSaturateHigh_Vector64_Int32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data1, _data2, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.MultiplyDoublingSaturateHigh( Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.MultiplyDoublingSaturateHigh( AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplyDoublingSaturateHigh), new Type[] { typeof(Vector64<Int32>), typeof(Vector64<Int32>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplyDoublingSaturateHigh), new Type[] { typeof(Vector64<Int32>), typeof(Vector64<Int32>) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.MultiplyDoublingSaturateHigh( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Int32>* pClsVar1 = &_clsVar1) fixed (Vector64<Int32>* pClsVar2 = &_clsVar2) { var result = AdvSimd.MultiplyDoublingSaturateHigh( AdvSimd.LoadVector64((Int32*)(pClsVar1)), AdvSimd.LoadVector64((Int32*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr); var result = AdvSimd.MultiplyDoublingSaturateHigh(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)); var result = AdvSimd.MultiplyDoublingSaturateHigh(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__MultiplyDoublingSaturateHigh_Vector64_Int32(); var result = AdvSimd.MultiplyDoublingSaturateHigh(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__MultiplyDoublingSaturateHigh_Vector64_Int32(); fixed (Vector64<Int32>* pFld1 = &test._fld1) fixed (Vector64<Int32>* pFld2 = &test._fld2) { var result = AdvSimd.MultiplyDoublingSaturateHigh( AdvSimd.LoadVector64((Int32*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.MultiplyDoublingSaturateHigh(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Int32>* pFld1 = &_fld1) fixed (Vector64<Int32>* pFld2 = &_fld2) { var result = AdvSimd.MultiplyDoublingSaturateHigh( AdvSimd.LoadVector64((Int32*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.MultiplyDoublingSaturateHigh(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.MultiplyDoublingSaturateHigh( AdvSimd.LoadVector64((Int32*)(&test._fld1)), AdvSimd.LoadVector64((Int32*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Int32> op1, Vector64<Int32> op2, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int32[] left, Int32[] right, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.MultiplyDoublingSaturateHigh(left[i], right[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.MultiplyDoublingSaturateHigh)}<Int32>(Vector64<Int32>, Vector64<Int32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void MultiplyDoublingSaturateHigh_Vector64_Int32() { var test = new SimpleBinaryOpTest__MultiplyDoublingSaturateHigh_Vector64_Int32(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__MultiplyDoublingSaturateHigh_Vector64_Int32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray1, Int32[] inArray2, Int32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int32> _fld1; public Vector64<Int32> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__MultiplyDoublingSaturateHigh_Vector64_Int32 testClass) { var result = AdvSimd.MultiplyDoublingSaturateHigh(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__MultiplyDoublingSaturateHigh_Vector64_Int32 testClass) { fixed (Vector64<Int32>* pFld1 = &_fld1) fixed (Vector64<Int32>* pFld2 = &_fld2) { var result = AdvSimd.MultiplyDoublingSaturateHigh( AdvSimd.LoadVector64((Int32*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static Int32[] _data1 = new Int32[Op1ElementCount]; private static Int32[] _data2 = new Int32[Op2ElementCount]; private static Vector64<Int32> _clsVar1; private static Vector64<Int32> _clsVar2; private Vector64<Int32> _fld1; private Vector64<Int32> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__MultiplyDoublingSaturateHigh_Vector64_Int32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); } public SimpleBinaryOpTest__MultiplyDoublingSaturateHigh_Vector64_Int32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data1, _data2, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.MultiplyDoublingSaturateHigh( Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.MultiplyDoublingSaturateHigh( AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplyDoublingSaturateHigh), new Type[] { typeof(Vector64<Int32>), typeof(Vector64<Int32>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplyDoublingSaturateHigh), new Type[] { typeof(Vector64<Int32>), typeof(Vector64<Int32>) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.MultiplyDoublingSaturateHigh( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Int32>* pClsVar1 = &_clsVar1) fixed (Vector64<Int32>* pClsVar2 = &_clsVar2) { var result = AdvSimd.MultiplyDoublingSaturateHigh( AdvSimd.LoadVector64((Int32*)(pClsVar1)), AdvSimd.LoadVector64((Int32*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr); var result = AdvSimd.MultiplyDoublingSaturateHigh(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)); var result = AdvSimd.MultiplyDoublingSaturateHigh(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__MultiplyDoublingSaturateHigh_Vector64_Int32(); var result = AdvSimd.MultiplyDoublingSaturateHigh(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__MultiplyDoublingSaturateHigh_Vector64_Int32(); fixed (Vector64<Int32>* pFld1 = &test._fld1) fixed (Vector64<Int32>* pFld2 = &test._fld2) { var result = AdvSimd.MultiplyDoublingSaturateHigh( AdvSimd.LoadVector64((Int32*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.MultiplyDoublingSaturateHigh(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Int32>* pFld1 = &_fld1) fixed (Vector64<Int32>* pFld2 = &_fld2) { var result = AdvSimd.MultiplyDoublingSaturateHigh( AdvSimd.LoadVector64((Int32*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.MultiplyDoublingSaturateHigh(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.MultiplyDoublingSaturateHigh( AdvSimd.LoadVector64((Int32*)(&test._fld1)), AdvSimd.LoadVector64((Int32*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Int32> op1, Vector64<Int32> op2, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int32[] left, Int32[] right, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.MultiplyDoublingSaturateHigh(left[i], right[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.MultiplyDoublingSaturateHigh)}<Int32>(Vector64<Int32>, Vector64<Int32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/coreclr/nativeaot/System.Private.Reflection.Core/src/System/Reflection/Runtime/TypeInfos/NativeFormat/NativeFormatRuntimeGenericParameterTypeInfoForTypes.UnificationKey.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Reflection; using System.Diagnostics; using System.Collections.Generic; using System.Reflection.Runtime.General; using System.Reflection.Runtime.TypeInfos; using Internal.Reflection.Tracing; using Internal.Metadata.NativeFormat; namespace System.Reflection.Runtime.TypeInfos.NativeFormat { internal sealed partial class NativeFormatRuntimeGenericParameterTypeInfoForTypes : NativeFormatRuntimeGenericParameterTypeInfo { // // Key for unification. // internal struct UnificationKey : IEquatable<UnificationKey> { public UnificationKey(MetadataReader reader, TypeDefinitionHandle typeDefinitionHandle, GenericParameterHandle genericParameterHandle) { Reader = reader; TypeDefinitionHandle = typeDefinitionHandle; GenericParameterHandle = genericParameterHandle; } public MetadataReader Reader { get; } public TypeDefinitionHandle TypeDefinitionHandle { get; } public GenericParameterHandle GenericParameterHandle { get; } public override bool Equals(object obj) { if (!(obj is UnificationKey other)) return false; return Equals(other); } public bool Equals(UnificationKey other) { if (!TypeDefinitionHandle.Equals(other.TypeDefinitionHandle)) return false; if (!(Reader == other.Reader)) return false; if (!(GenericParameterHandle.Equals(other.GenericParameterHandle))) return false; return true; } public override int GetHashCode() { return TypeDefinitionHandle.GetHashCode(); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Reflection; using System.Diagnostics; using System.Collections.Generic; using System.Reflection.Runtime.General; using System.Reflection.Runtime.TypeInfos; using Internal.Reflection.Tracing; using Internal.Metadata.NativeFormat; namespace System.Reflection.Runtime.TypeInfos.NativeFormat { internal sealed partial class NativeFormatRuntimeGenericParameterTypeInfoForTypes : NativeFormatRuntimeGenericParameterTypeInfo { // // Key for unification. // internal struct UnificationKey : IEquatable<UnificationKey> { public UnificationKey(MetadataReader reader, TypeDefinitionHandle typeDefinitionHandle, GenericParameterHandle genericParameterHandle) { Reader = reader; TypeDefinitionHandle = typeDefinitionHandle; GenericParameterHandle = genericParameterHandle; } public MetadataReader Reader { get; } public TypeDefinitionHandle TypeDefinitionHandle { get; } public GenericParameterHandle GenericParameterHandle { get; } public override bool Equals(object obj) { if (!(obj is UnificationKey other)) return false; return Equals(other); } public bool Equals(UnificationKey other) { if (!TypeDefinitionHandle.Equals(other.TypeDefinitionHandle)) return false; if (!(Reader == other.Reader)) return false; if (!(GenericParameterHandle.Equals(other.GenericParameterHandle))) return false; return true; } public override int GetHashCode() { return TypeDefinitionHandle.GetHashCode(); } } } }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/tests/JIT/IL_Conformance/Old/Conformance_Base/ret_r8.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .class public _ret { .method public static float64 ret_test(float64) { .maxstack 1 ldarg 0 ret } .method public void _ret() { .maxstack 0 ret } .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 12 ldc.r8 float64(0x7FF8000000000000) call float64 _ret::ret_test(float64) ldc.r8 float64(0x7FF8000000000000) cgt.un brfalse FAIL ldc.r8 float64(0x7FF0000000000000) call float64 _ret::ret_test(float64) ldc.r8 float64(0x7FF0000000000000) ceq brfalse FAIL ldc.r8 float64(0x7FEFFFFFFFFFFFFF) call float64 _ret::ret_test(float64) ldc.r8 float64(0x7FEFFFFFFFFFFFFF) ceq brfalse FAIL ldc.r8 float64(0x3FF0000000000000) call float64 _ret::ret_test(float64) ldc.r8 float64(0x3FF0000000000000) ceq brfalse FAIL ldc.r8 float64(0x0000000000000000) call float64 _ret::ret_test(float64) ldc.r8 float64(0x0000000000000000) ceq brfalse FAIL ldc.r8 float64(0x8000000000000000) call float64 _ret::ret_test(float64) ldc.r8 float64(0x8000000000000000) ceq brfalse FAIL ldc.r8 float64(0xBFF0000000000000) call float64 _ret::ret_test(float64) ldc.r8 float64(0xBFF0000000000000) ceq brfalse FAIL ldc.r8 float64(0xFFEFFFFFFFFFFFFF) call float64 _ret::ret_test(float64) ldc.r8 float64(0xFFEFFFFFFFFFFFFF) ceq brfalse FAIL ldc.r8 float64(0xFFF0000000000000) call float64 _ret::ret_test(float64) ldc.r8 float64(0xFFF0000000000000) ceq brfalse FAIL PASS: ldc.i4 100 br END FAIL: ldc.i4 0x00000000 END: ret } } .assembly ret_r8{}
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .class public _ret { .method public static float64 ret_test(float64) { .maxstack 1 ldarg 0 ret } .method public void _ret() { .maxstack 0 ret } .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 12 ldc.r8 float64(0x7FF8000000000000) call float64 _ret::ret_test(float64) ldc.r8 float64(0x7FF8000000000000) cgt.un brfalse FAIL ldc.r8 float64(0x7FF0000000000000) call float64 _ret::ret_test(float64) ldc.r8 float64(0x7FF0000000000000) ceq brfalse FAIL ldc.r8 float64(0x7FEFFFFFFFFFFFFF) call float64 _ret::ret_test(float64) ldc.r8 float64(0x7FEFFFFFFFFFFFFF) ceq brfalse FAIL ldc.r8 float64(0x3FF0000000000000) call float64 _ret::ret_test(float64) ldc.r8 float64(0x3FF0000000000000) ceq brfalse FAIL ldc.r8 float64(0x0000000000000000) call float64 _ret::ret_test(float64) ldc.r8 float64(0x0000000000000000) ceq brfalse FAIL ldc.r8 float64(0x8000000000000000) call float64 _ret::ret_test(float64) ldc.r8 float64(0x8000000000000000) ceq brfalse FAIL ldc.r8 float64(0xBFF0000000000000) call float64 _ret::ret_test(float64) ldc.r8 float64(0xBFF0000000000000) ceq brfalse FAIL ldc.r8 float64(0xFFEFFFFFFFFFFFFF) call float64 _ret::ret_test(float64) ldc.r8 float64(0xFFEFFFFFFFFFFFFF) ceq brfalse FAIL ldc.r8 float64(0xFFF0000000000000) call float64 _ret::ret_test(float64) ldc.r8 float64(0xFFF0000000000000) ceq brfalse FAIL PASS: ldc.i4 100 br END FAIL: ldc.i4 0x00000000 END: ret } } .assembly ret_r8{}
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/tests/JIT/CodeGenBringUpTests/NestedCall_r.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="NestedCall.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="NestedCall.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/tests/JIT/HardwareIntrinsics/X86/AvxVnni/MultiplyWideningAndAdd.Int16.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; using System.Text.RegularExpressions; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void MultiplyWideningAndAddInt16() { var test = new SimpleTernaryOpTest__MultiplyWideningAndAddInt16(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } else { Console.WriteLine("Avx Is Not Supported"); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); //TODO: this one does not work. Fix it. if (Avx.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { Console.WriteLine("Test Is Not Supported"); // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleTernaryOpTest__MultiplyWideningAndAddInt16 { private struct DataTable { private byte[] inArray0; private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle0; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray0, Int16[] inArray1, Int16[] inArray2, Int32[] outArray, int alignment) { int sizeOfinArray0 = inArray0.Length * Unsafe.SizeOf<Int32>(); int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if((alignment != 32 && alignment != 16) || (alignment *2) < sizeOfinArray0 || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray0 = new byte[alignment * 2]; this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle0 = GCHandle.Alloc(this.inArray0, GCHandleType.Pinned); this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray0Ptr), ref Unsafe.As<Int32, byte>(ref inArray0[0]), (uint)sizeOfinArray0); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int16, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray0Ptr => Align((byte*)(inHandle0.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle0.Free(); inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlighment) { return (void*)(((ulong)buffer + expectedAlighment -1) & ~(expectedAlighment - 1)); } } private struct TestStruct { public Vector256<Int32> _fld0; public Vector256<Int16> _fld1; public Vector256<Int16> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op0ElementCount; i++) { _data0[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref testStruct._fld0), ref Unsafe.As<Int32, byte>(ref _data0[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = (sbyte)TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref testStruct._fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>()); return testStruct; } public void RunStructFldScenario(SimpleTernaryOpTest__MultiplyWideningAndAddInt16 testClass) { var result = AvxVnni.MultiplyWideningAndAdd(_fld0, _fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld0, _fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 32; private static readonly int Op0ElementCount = Unsafe.SizeOf<Vector256<Int32>>() / sizeof(Int32); private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Int16>>() / sizeof(Int16); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<Int16>>() / sizeof(Int16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<Int32>>() / sizeof(Int32); private static Int32[] _data0 = new Int32[Op0ElementCount]; private static Int16[] _data1 = new Int16[Op1ElementCount]; private static Int16[] _data2 = new Int16[Op2ElementCount]; private static Vector256<Int32> _clsVar0; private static Vector256<Int16> _clsVar1; private static Vector256<Int16> _clsVar2; private Vector256<Int32> _fld0; private Vector256<Int16> _fld1; private Vector256<Int16> _fld2; private DataTable _dataTable; static SimpleTernaryOpTest__MultiplyWideningAndAddInt16() { for (var i = 0; i < Op0ElementCount; i++) { _data0[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref _clsVar0), ref Unsafe.As<Int32, byte>(ref _data0[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = (sbyte)TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref _clsVar2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>()); } public SimpleTernaryOpTest__MultiplyWideningAndAddInt16() { Succeeded = true; for (var i = 0; i < Op0ElementCount; i++) { _data0[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref _fld0), ref Unsafe.As<Int32, byte>(ref _data0[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = (sbyte)TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref _fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>()); for (var i = 0; i < Op0ElementCount; i++) { _data0[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } _dataTable = new DataTable(_data0, _data1, _data2, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => AvxVnni.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AvxVnni.MultiplyWideningAndAdd( Unsafe.Read<Vector256<Int32>>(_dataTable.inArray0Ptr), Unsafe.Read<Vector256<Int16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<Int16>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AvxVnni.MultiplyWideningAndAdd( Avx.LoadVector256((Int32*)(_dataTable.inArray0Ptr)), Avx.LoadVector256((Int16*)(_dataTable.inArray1Ptr)), Avx.LoadVector256((Int16*)(_dataTable.inArray2Ptr))); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = AvxVnni.MultiplyWideningAndAdd( Avx.LoadAlignedVector256((Int32*)(_dataTable.inArray0Ptr)), Avx.LoadAlignedVector256((Int16*)(_dataTable.inArray1Ptr)), Avx.LoadAlignedVector256((Int16*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AvxVnni).GetMethod(nameof(AvxVnni.MultiplyWideningAndAdd), new Type[] { typeof(Vector256<Int32>), typeof(Vector256<Int16>), typeof(Vector256<Int16>) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<Int32>>(_dataTable.inArray0Ptr), Unsafe.Read<Vector256<Int16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<Int16>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AvxVnni).GetMethod(nameof(AvxVnni.MultiplyWideningAndAdd), new Type[] { typeof(Vector256<Int32>), typeof(Vector256<Int16>), typeof(Vector256<Int16>) }) .Invoke(null, new object[] { Avx.LoadVector256((Int32*)(_dataTable.inArray0Ptr)), Avx.LoadVector256((Int16*)(_dataTable.inArray1Ptr)), Avx.LoadVector256((Int16*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(AvxVnni).GetMethod(nameof(AvxVnni.MultiplyWideningAndAdd), new Type[] { typeof(Vector256<Int32>), typeof(Vector256<Int16>), typeof(Vector256<Int16>) }) .Invoke(null, new object[] { Avx.LoadAlignedVector256((Int32*)(_dataTable.inArray0Ptr)), Avx.LoadAlignedVector256((Int16*)(_dataTable.inArray1Ptr)), Avx.LoadAlignedVector256((Int16*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AvxVnni.MultiplyWideningAndAdd( _clsVar0, _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar0, _clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var first = Unsafe.Read<Vector256<Int32>>(_dataTable.inArray0Ptr); var second = Unsafe.Read<Vector256<Int16>>(_dataTable.inArray1Ptr); var third = Unsafe.Read<Vector256<Int16>>(_dataTable.inArray2Ptr); var result = AvxVnni.MultiplyWideningAndAdd(first, second, third); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(first, second, third, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var first= Avx.LoadVector256((Int32*)(_dataTable.inArray0Ptr)); var second = Avx.LoadVector256((Int16*)(_dataTable.inArray1Ptr)); var third = Avx.LoadVector256((Int16*)(_dataTable.inArray2Ptr)); var result = AvxVnni.MultiplyWideningAndAdd(first, second, third); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(first, second, third, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var first = Avx.LoadAlignedVector256((Int32*)(_dataTable.inArray0Ptr)); var second = Avx.LoadAlignedVector256((Int16*)(_dataTable.inArray1Ptr)); var third = Avx.LoadAlignedVector256((Int16*)(_dataTable.inArray2Ptr)); var result = AvxVnni.MultiplyWideningAndAdd(first, second, third); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(first, second, third, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleTernaryOpTest__MultiplyWideningAndAddInt16(); var result = AvxVnni.MultiplyWideningAndAdd(test._fld0, test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld0, test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AvxVnni.MultiplyWideningAndAdd(_fld0, _fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld0, _fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AvxVnni.MultiplyWideningAndAdd(test._fld0, test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld0, test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector256<Int32> addend, Vector256<Int16> left, Vector256<Int16> right, void* result, [CallerMemberName] string method = "") { Int32[] inArray0 = new Int32[Op0ElementCount]; Int16[] inArray1 = new Int16[Op1ElementCount]; Int16[] inArray2 = new Int16[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray0[0]), addend); Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), left); Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), right); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int32>>()); ValidateResult(inArray0, inArray1, inArray2, outArray, method); } private void ValidateResult(void* addend, void* left, void* right, void* result, [CallerMemberName] string method = "") { Int32[] inArray0 = new Int32[Op0ElementCount]; Int16[] inArray1 = new Int16[Op1ElementCount]; Int16[] inArray2 = new Int16[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray0[0]), ref Unsafe.AsRef<byte>(addend), (uint)Unsafe.SizeOf<Vector256<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(left), (uint)Unsafe.SizeOf<Vector256<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(right), (uint)Unsafe.SizeOf<Vector256<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int32>>()); ValidateResult(inArray0, inArray1, inArray2, outArray, method); } private void ValidateResult(Int32[] addend, Int16[] left, Int16[] right, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; Int32[] outArray = new Int32[RetElementCount]; for (var i = 0; i < RetElementCount; i++) { outArray[i] = Math.Clamp((addend[i] + (right[i * 2 + 1] * left[i * 2 + 1] + right[i * 2] * left[i * 2])), int.MinValue, int.MaxValue); } for (var i = 0; i < RetElementCount; i++) { if (result[i] != outArray[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AvxVnni)}.{nameof(AvxVnni.MultiplyWideningAndAdd)}<Int32>(Vector256<Int32>, Vector256<Int32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" addend: ({string.Join(", ", addend)})"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation($" valid: ({string.Join(", ", outArray)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; using System.Text.RegularExpressions; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void MultiplyWideningAndAddInt16() { var test = new SimpleTernaryOpTest__MultiplyWideningAndAddInt16(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } else { Console.WriteLine("Avx Is Not Supported"); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); //TODO: this one does not work. Fix it. if (Avx.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { Console.WriteLine("Test Is Not Supported"); // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleTernaryOpTest__MultiplyWideningAndAddInt16 { private struct DataTable { private byte[] inArray0; private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle0; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray0, Int16[] inArray1, Int16[] inArray2, Int32[] outArray, int alignment) { int sizeOfinArray0 = inArray0.Length * Unsafe.SizeOf<Int32>(); int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if((alignment != 32 && alignment != 16) || (alignment *2) < sizeOfinArray0 || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray0 = new byte[alignment * 2]; this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle0 = GCHandle.Alloc(this.inArray0, GCHandleType.Pinned); this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray0Ptr), ref Unsafe.As<Int32, byte>(ref inArray0[0]), (uint)sizeOfinArray0); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int16, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray0Ptr => Align((byte*)(inHandle0.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle0.Free(); inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlighment) { return (void*)(((ulong)buffer + expectedAlighment -1) & ~(expectedAlighment - 1)); } } private struct TestStruct { public Vector256<Int32> _fld0; public Vector256<Int16> _fld1; public Vector256<Int16> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op0ElementCount; i++) { _data0[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref testStruct._fld0), ref Unsafe.As<Int32, byte>(ref _data0[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = (sbyte)TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref testStruct._fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>()); return testStruct; } public void RunStructFldScenario(SimpleTernaryOpTest__MultiplyWideningAndAddInt16 testClass) { var result = AvxVnni.MultiplyWideningAndAdd(_fld0, _fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld0, _fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 32; private static readonly int Op0ElementCount = Unsafe.SizeOf<Vector256<Int32>>() / sizeof(Int32); private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Int16>>() / sizeof(Int16); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<Int16>>() / sizeof(Int16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<Int32>>() / sizeof(Int32); private static Int32[] _data0 = new Int32[Op0ElementCount]; private static Int16[] _data1 = new Int16[Op1ElementCount]; private static Int16[] _data2 = new Int16[Op2ElementCount]; private static Vector256<Int32> _clsVar0; private static Vector256<Int16> _clsVar1; private static Vector256<Int16> _clsVar2; private Vector256<Int32> _fld0; private Vector256<Int16> _fld1; private Vector256<Int16> _fld2; private DataTable _dataTable; static SimpleTernaryOpTest__MultiplyWideningAndAddInt16() { for (var i = 0; i < Op0ElementCount; i++) { _data0[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref _clsVar0), ref Unsafe.As<Int32, byte>(ref _data0[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = (sbyte)TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref _clsVar2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>()); } public SimpleTernaryOpTest__MultiplyWideningAndAddInt16() { Succeeded = true; for (var i = 0; i < Op0ElementCount; i++) { _data0[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref _fld0), ref Unsafe.As<Int32, byte>(ref _data0[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = (sbyte)TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref _fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>()); for (var i = 0; i < Op0ElementCount; i++) { _data0[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } _dataTable = new DataTable(_data0, _data1, _data2, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => AvxVnni.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AvxVnni.MultiplyWideningAndAdd( Unsafe.Read<Vector256<Int32>>(_dataTable.inArray0Ptr), Unsafe.Read<Vector256<Int16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<Int16>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AvxVnni.MultiplyWideningAndAdd( Avx.LoadVector256((Int32*)(_dataTable.inArray0Ptr)), Avx.LoadVector256((Int16*)(_dataTable.inArray1Ptr)), Avx.LoadVector256((Int16*)(_dataTable.inArray2Ptr))); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = AvxVnni.MultiplyWideningAndAdd( Avx.LoadAlignedVector256((Int32*)(_dataTable.inArray0Ptr)), Avx.LoadAlignedVector256((Int16*)(_dataTable.inArray1Ptr)), Avx.LoadAlignedVector256((Int16*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AvxVnni).GetMethod(nameof(AvxVnni.MultiplyWideningAndAdd), new Type[] { typeof(Vector256<Int32>), typeof(Vector256<Int16>), typeof(Vector256<Int16>) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<Int32>>(_dataTable.inArray0Ptr), Unsafe.Read<Vector256<Int16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<Int16>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AvxVnni).GetMethod(nameof(AvxVnni.MultiplyWideningAndAdd), new Type[] { typeof(Vector256<Int32>), typeof(Vector256<Int16>), typeof(Vector256<Int16>) }) .Invoke(null, new object[] { Avx.LoadVector256((Int32*)(_dataTable.inArray0Ptr)), Avx.LoadVector256((Int16*)(_dataTable.inArray1Ptr)), Avx.LoadVector256((Int16*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(AvxVnni).GetMethod(nameof(AvxVnni.MultiplyWideningAndAdd), new Type[] { typeof(Vector256<Int32>), typeof(Vector256<Int16>), typeof(Vector256<Int16>) }) .Invoke(null, new object[] { Avx.LoadAlignedVector256((Int32*)(_dataTable.inArray0Ptr)), Avx.LoadAlignedVector256((Int16*)(_dataTable.inArray1Ptr)), Avx.LoadAlignedVector256((Int16*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AvxVnni.MultiplyWideningAndAdd( _clsVar0, _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar0, _clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var first = Unsafe.Read<Vector256<Int32>>(_dataTable.inArray0Ptr); var second = Unsafe.Read<Vector256<Int16>>(_dataTable.inArray1Ptr); var third = Unsafe.Read<Vector256<Int16>>(_dataTable.inArray2Ptr); var result = AvxVnni.MultiplyWideningAndAdd(first, second, third); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(first, second, third, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var first= Avx.LoadVector256((Int32*)(_dataTable.inArray0Ptr)); var second = Avx.LoadVector256((Int16*)(_dataTable.inArray1Ptr)); var third = Avx.LoadVector256((Int16*)(_dataTable.inArray2Ptr)); var result = AvxVnni.MultiplyWideningAndAdd(first, second, third); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(first, second, third, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var first = Avx.LoadAlignedVector256((Int32*)(_dataTable.inArray0Ptr)); var second = Avx.LoadAlignedVector256((Int16*)(_dataTable.inArray1Ptr)); var third = Avx.LoadAlignedVector256((Int16*)(_dataTable.inArray2Ptr)); var result = AvxVnni.MultiplyWideningAndAdd(first, second, third); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(first, second, third, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleTernaryOpTest__MultiplyWideningAndAddInt16(); var result = AvxVnni.MultiplyWideningAndAdd(test._fld0, test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld0, test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AvxVnni.MultiplyWideningAndAdd(_fld0, _fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld0, _fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AvxVnni.MultiplyWideningAndAdd(test._fld0, test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld0, test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector256<Int32> addend, Vector256<Int16> left, Vector256<Int16> right, void* result, [CallerMemberName] string method = "") { Int32[] inArray0 = new Int32[Op0ElementCount]; Int16[] inArray1 = new Int16[Op1ElementCount]; Int16[] inArray2 = new Int16[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray0[0]), addend); Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), left); Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), right); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int32>>()); ValidateResult(inArray0, inArray1, inArray2, outArray, method); } private void ValidateResult(void* addend, void* left, void* right, void* result, [CallerMemberName] string method = "") { Int32[] inArray0 = new Int32[Op0ElementCount]; Int16[] inArray1 = new Int16[Op1ElementCount]; Int16[] inArray2 = new Int16[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray0[0]), ref Unsafe.AsRef<byte>(addend), (uint)Unsafe.SizeOf<Vector256<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(left), (uint)Unsafe.SizeOf<Vector256<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(right), (uint)Unsafe.SizeOf<Vector256<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int32>>()); ValidateResult(inArray0, inArray1, inArray2, outArray, method); } private void ValidateResult(Int32[] addend, Int16[] left, Int16[] right, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; Int32[] outArray = new Int32[RetElementCount]; for (var i = 0; i < RetElementCount; i++) { outArray[i] = Math.Clamp((addend[i] + (right[i * 2 + 1] * left[i * 2 + 1] + right[i * 2] * left[i * 2])), int.MinValue, int.MaxValue); } for (var i = 0; i < RetElementCount; i++) { if (result[i] != outArray[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AvxVnni)}.{nameof(AvxVnni.MultiplyWideningAndAdd)}<Int32>(Vector256<Int32>, Vector256<Int32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" addend: ({string.Join(", ", addend)})"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation($" valid: ({string.Join(", ", outArray)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/libraries/System.ComponentModel.Primitives/src/System/ComponentModel/RefreshPropertiesAttribute.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics.CodeAnalysis; namespace System.ComponentModel { /// <summary> /// Specifies how a designer refreshes when the property value is changed. /// </summary> [AttributeUsage(AttributeTargets.All)] public sealed class RefreshPropertiesAttribute : Attribute { /// <summary> /// Indicates all properties should be refreshed if the property value is changed. /// This field is read-only. /// </summary> public static readonly RefreshPropertiesAttribute All = new RefreshPropertiesAttribute(RefreshProperties.All); /// <summary> /// Indicates all properties should be invalidated and repainted if the property /// value is changed. This field is read-only. /// </summary> public static readonly RefreshPropertiesAttribute Repaint = new RefreshPropertiesAttribute(RefreshProperties.Repaint); /// <summary> /// Indicates that by default no properties should be refreshed if the property /// value is changed. This field is read-only. /// </summary> public static readonly RefreshPropertiesAttribute Default = new RefreshPropertiesAttribute(RefreshProperties.None); public RefreshPropertiesAttribute(RefreshProperties refresh) { RefreshProperties = refresh; } /// <summary> /// Gets the refresh properties for the member. /// </summary> public RefreshProperties RefreshProperties { get; } public override bool Equals([NotNullWhen(true)] object? obj) => obj is RefreshPropertiesAttribute other && other.RefreshProperties == RefreshProperties; public override int GetHashCode() => base.GetHashCode(); public override bool IsDefaultAttribute() => Equals(Default); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics.CodeAnalysis; namespace System.ComponentModel { /// <summary> /// Specifies how a designer refreshes when the property value is changed. /// </summary> [AttributeUsage(AttributeTargets.All)] public sealed class RefreshPropertiesAttribute : Attribute { /// <summary> /// Indicates all properties should be refreshed if the property value is changed. /// This field is read-only. /// </summary> public static readonly RefreshPropertiesAttribute All = new RefreshPropertiesAttribute(RefreshProperties.All); /// <summary> /// Indicates all properties should be invalidated and repainted if the property /// value is changed. This field is read-only. /// </summary> public static readonly RefreshPropertiesAttribute Repaint = new RefreshPropertiesAttribute(RefreshProperties.Repaint); /// <summary> /// Indicates that by default no properties should be refreshed if the property /// value is changed. This field is read-only. /// </summary> public static readonly RefreshPropertiesAttribute Default = new RefreshPropertiesAttribute(RefreshProperties.None); public RefreshPropertiesAttribute(RefreshProperties refresh) { RefreshProperties = refresh; } /// <summary> /// Gets the refresh properties for the member. /// </summary> public RefreshProperties RefreshProperties { get; } public override bool Equals([NotNullWhen(true)] object? obj) => obj is RefreshPropertiesAttribute other && other.RefreshProperties == RefreshProperties; public override int GetHashCode() => base.GetHashCode(); public override bool IsDefaultAttribute() => Equals(Default); } }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/libraries/System.Data.Common/src/System/Data/DataViewSetting.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.ComponentModel; using System.Diagnostics.CodeAnalysis; namespace System.Data { [TypeConverter((typeof(ExpandableObjectConverter)))] public class DataViewSetting { private DataViewManager? _dataViewManager; private DataTable? _table; private string _sort = string.Empty; private string _rowFilter = string.Empty; private DataViewRowState _rowStateFilter = DataViewRowState.CurrentRows; private bool _applyDefaultSort; internal DataViewSetting() { } public bool ApplyDefaultSort { get { return _applyDefaultSort; } set { if (_applyDefaultSort != value) { _applyDefaultSort = value; } } } [Browsable(false)] public DataViewManager? DataViewManager => _dataViewManager; internal void SetDataViewManager(DataViewManager dataViewManager) { if (_dataViewManager != dataViewManager) { _dataViewManager = dataViewManager; } } [Browsable(false)] public DataTable? Table => _table; internal void SetDataTable(DataTable table) { if (_table != table) { _table = table; } } [AllowNull] public string RowFilter { get { return _rowFilter; } [RequiresUnreferencedCode(Select.RequiresUnreferencedCodeMessage)] set { if (value == null) { value = string.Empty; } if (_rowFilter != value) { _rowFilter = value; } } } public DataViewRowState RowStateFilter { get { return _rowStateFilter; } set { if (_rowStateFilter != value) { _rowStateFilter = value; } } } [AllowNull] public string Sort { get { return _sort; } set { if (value == null) { value = string.Empty; } if (_sort != value) { _sort = value; } } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.ComponentModel; using System.Diagnostics.CodeAnalysis; namespace System.Data { [TypeConverter((typeof(ExpandableObjectConverter)))] public class DataViewSetting { private DataViewManager? _dataViewManager; private DataTable? _table; private string _sort = string.Empty; private string _rowFilter = string.Empty; private DataViewRowState _rowStateFilter = DataViewRowState.CurrentRows; private bool _applyDefaultSort; internal DataViewSetting() { } public bool ApplyDefaultSort { get { return _applyDefaultSort; } set { if (_applyDefaultSort != value) { _applyDefaultSort = value; } } } [Browsable(false)] public DataViewManager? DataViewManager => _dataViewManager; internal void SetDataViewManager(DataViewManager dataViewManager) { if (_dataViewManager != dataViewManager) { _dataViewManager = dataViewManager; } } [Browsable(false)] public DataTable? Table => _table; internal void SetDataTable(DataTable table) { if (_table != table) { _table = table; } } [AllowNull] public string RowFilter { get { return _rowFilter; } [RequiresUnreferencedCode(Select.RequiresUnreferencedCodeMessage)] set { if (value == null) { value = string.Empty; } if (_rowFilter != value) { _rowFilter = value; } } } public DataViewRowState RowStateFilter { get { return _rowStateFilter; } set { if (_rowStateFilter != value) { _rowStateFilter = value; } } } [AllowNull] public string Sort { get { return _sort; } set { if (value == null) { value = string.Empty; } if (_sort != value) { _sort = value; } } } } }
-1
dotnet/runtime
66,257
Cloning improvements
Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
BruceForstall
2022-03-06T02:30:47Z
2022-03-21T16:36:12Z
a1f26fbc29e645eda585fd3a4093311101521855
a1bf79e06b64edef0957a9cc907180c25fa8aab9
Cloning improvements. Remove loop cloning variable initialization condition: Assume that any pre-existing initialization is acceptable. Check condition against zero if necessary. Const inits remain as before. Lots of diffs due to more cloning for cases of `for (i = expression...` where `expression` is not just a constant or local var. Also, fix various comments that were no longer correct (e.g., "first" block concept is gone)
./src/tests/baseservices/threading/generics/TimerCallback/thread25.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Threading; class Gen<T> { public static void Target(object p) { if (Test_thread25.Xcounter>=Test_thread25.nThreads) { ManualResetEvent evt = (ManualResetEvent) p; evt.Set(); } else { Interlocked.Increment(ref Test_thread25.Xcounter); } } public static void ThreadPoolTest() { ManualResetEvent evt = new ManualResetEvent(false); TimerCallback tcb = new TimerCallback(Gen<T>.Target); Timer timer = new Timer(tcb,evt,Test_thread25.delay,Test_thread25.period); evt.WaitOne(); timer.Dispose(); Test_thread25.Eval(Test_thread25.Xcounter>=Test_thread25.nThreads); Test_thread25.Xcounter = 0; } } public class Test_thread25 { public static int delay = 0; public static int period = 2; public static int nThreads = 5; public static int counter = 0; public static int Xcounter = 0; public static bool result = true; public static void Eval(bool exp) { counter++; if (!exp) { result = exp; Console.WriteLine("Test Failed at location: " + counter); } } public static int Main() { Gen<int>.ThreadPoolTest(); Gen<double>.ThreadPoolTest(); Gen<string>.ThreadPoolTest(); Gen<object>.ThreadPoolTest(); Gen<Guid>.ThreadPoolTest(); Gen<int[]>.ThreadPoolTest(); Gen<double[,]>.ThreadPoolTest(); Gen<string[][][]>.ThreadPoolTest(); Gen<object[,,,]>.ThreadPoolTest(); Gen<Guid[][,,,][]>.ThreadPoolTest(); if (result) { Console.WriteLine("Test Passed"); return 100; } else { Console.WriteLine("Test Failed"); return 1; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Threading; class Gen<T> { public static void Target(object p) { if (Test_thread25.Xcounter>=Test_thread25.nThreads) { ManualResetEvent evt = (ManualResetEvent) p; evt.Set(); } else { Interlocked.Increment(ref Test_thread25.Xcounter); } } public static void ThreadPoolTest() { ManualResetEvent evt = new ManualResetEvent(false); TimerCallback tcb = new TimerCallback(Gen<T>.Target); Timer timer = new Timer(tcb,evt,Test_thread25.delay,Test_thread25.period); evt.WaitOne(); timer.Dispose(); Test_thread25.Eval(Test_thread25.Xcounter>=Test_thread25.nThreads); Test_thread25.Xcounter = 0; } } public class Test_thread25 { public static int delay = 0; public static int period = 2; public static int nThreads = 5; public static int counter = 0; public static int Xcounter = 0; public static bool result = true; public static void Eval(bool exp) { counter++; if (!exp) { result = exp; Console.WriteLine("Test Failed at location: " + counter); } } public static int Main() { Gen<int>.ThreadPoolTest(); Gen<double>.ThreadPoolTest(); Gen<string>.ThreadPoolTest(); Gen<object>.ThreadPoolTest(); Gen<Guid>.ThreadPoolTest(); Gen<int[]>.ThreadPoolTest(); Gen<double[,]>.ThreadPoolTest(); Gen<string[][][]>.ThreadPoolTest(); Gen<object[,,,]>.ThreadPoolTest(); Gen<Guid[][,,,][]>.ThreadPoolTest(); if (result) { Console.WriteLine("Test Passed"); return 100; } else { Console.WriteLine("Test Failed"); return 1; } } }
-1